var/home/core/zuul-output/0000755000175000017500000000000015112511361014521 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112530167015472 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005414232415112530160017674 0ustar rootrootNov 29 06:33:44 crc systemd[1]: Starting Kubernetes Kubelet... Nov 29 06:33:44 crc restorecon[4762]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:44 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 06:33:45 crc restorecon[4762]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 29 06:33:45 crc kubenswrapper[4943]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.172878 4943 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176266 4943 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176296 4943 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176304 4943 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176308 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176313 4943 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176317 4943 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176321 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176326 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176339 4943 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176345 4943 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176352 4943 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176357 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176363 4943 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176369 4943 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176375 4943 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176382 4943 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176387 4943 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176392 4943 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176397 4943 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176402 4943 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176406 4943 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176411 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176415 4943 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176420 4943 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176425 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176429 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176434 4943 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176439 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176444 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176448 4943 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176453 4943 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176457 4943 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176465 4943 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176472 4943 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176477 4943 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176482 4943 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176488 4943 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176492 4943 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176499 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176504 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176509 4943 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176513 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176520 4943 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176527 4943 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176532 4943 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176537 4943 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176543 4943 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176548 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176553 4943 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176560 4943 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176585 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176590 4943 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176595 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176599 4943 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176603 4943 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176607 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176611 4943 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176615 4943 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176619 4943 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176623 4943 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176627 4943 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176631 4943 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176635 4943 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176639 4943 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176643 4943 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176648 4943 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176652 4943 feature_gate.go:330] unrecognized feature gate: Example Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176656 4943 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176660 4943 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176667 4943 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.176671 4943 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176767 4943 flags.go:64] FLAG: --address="0.0.0.0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176778 4943 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176788 4943 flags.go:64] FLAG: --anonymous-auth="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176795 4943 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176802 4943 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176807 4943 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176814 4943 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176821 4943 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176827 4943 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176832 4943 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176839 4943 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176845 4943 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176850 4943 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176856 4943 flags.go:64] FLAG: --cgroup-root="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176861 4943 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176866 4943 flags.go:64] FLAG: --client-ca-file="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176870 4943 flags.go:64] FLAG: --cloud-config="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176875 4943 flags.go:64] FLAG: --cloud-provider="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176880 4943 flags.go:64] FLAG: --cluster-dns="[]" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176886 4943 flags.go:64] FLAG: --cluster-domain="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176891 4943 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176896 4943 flags.go:64] FLAG: --config-dir="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176901 4943 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176908 4943 flags.go:64] FLAG: --container-log-max-files="5" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176916 4943 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176921 4943 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176926 4943 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176932 4943 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176937 4943 flags.go:64] FLAG: --contention-profiling="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176942 4943 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176947 4943 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176953 4943 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176958 4943 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176965 4943 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176970 4943 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176974 4943 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176980 4943 flags.go:64] FLAG: --enable-load-reader="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176985 4943 flags.go:64] FLAG: --enable-server="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176990 4943 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.176998 4943 flags.go:64] FLAG: --event-burst="100" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177003 4943 flags.go:64] FLAG: --event-qps="50" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177008 4943 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177015 4943 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177020 4943 flags.go:64] FLAG: --eviction-hard="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177026 4943 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177032 4943 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177038 4943 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177044 4943 flags.go:64] FLAG: --eviction-soft="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177049 4943 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177054 4943 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177059 4943 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177065 4943 flags.go:64] FLAG: --experimental-mounter-path="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177070 4943 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177260 4943 flags.go:64] FLAG: --fail-swap-on="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177271 4943 flags.go:64] FLAG: --feature-gates="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177279 4943 flags.go:64] FLAG: --file-check-frequency="20s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177285 4943 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177290 4943 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177296 4943 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177301 4943 flags.go:64] FLAG: --healthz-port="10248" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177306 4943 flags.go:64] FLAG: --help="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177311 4943 flags.go:64] FLAG: --hostname-override="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177317 4943 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177322 4943 flags.go:64] FLAG: --http-check-frequency="20s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177327 4943 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177332 4943 flags.go:64] FLAG: --image-credential-provider-config="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177337 4943 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177343 4943 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177347 4943 flags.go:64] FLAG: --image-service-endpoint="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177352 4943 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177357 4943 flags.go:64] FLAG: --kube-api-burst="100" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177362 4943 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177367 4943 flags.go:64] FLAG: --kube-api-qps="50" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177372 4943 flags.go:64] FLAG: --kube-reserved="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177378 4943 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177384 4943 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177389 4943 flags.go:64] FLAG: --kubelet-cgroups="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177394 4943 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177399 4943 flags.go:64] FLAG: --lock-file="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177404 4943 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177409 4943 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177414 4943 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177422 4943 flags.go:64] FLAG: --log-json-split-stream="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177428 4943 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177432 4943 flags.go:64] FLAG: --log-text-split-stream="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177438 4943 flags.go:64] FLAG: --logging-format="text" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177443 4943 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177448 4943 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177453 4943 flags.go:64] FLAG: --manifest-url="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177458 4943 flags.go:64] FLAG: --manifest-url-header="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177466 4943 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177471 4943 flags.go:64] FLAG: --max-open-files="1000000" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177478 4943 flags.go:64] FLAG: --max-pods="110" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177483 4943 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177489 4943 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177494 4943 flags.go:64] FLAG: --memory-manager-policy="None" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177499 4943 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177504 4943 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177510 4943 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177515 4943 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177527 4943 flags.go:64] FLAG: --node-status-max-images="50" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177532 4943 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177537 4943 flags.go:64] FLAG: --oom-score-adj="-999" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177541 4943 flags.go:64] FLAG: --pod-cidr="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177546 4943 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177555 4943 flags.go:64] FLAG: --pod-manifest-path="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177560 4943 flags.go:64] FLAG: --pod-max-pids="-1" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177585 4943 flags.go:64] FLAG: --pods-per-core="0" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177591 4943 flags.go:64] FLAG: --port="10250" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177595 4943 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177600 4943 flags.go:64] FLAG: --provider-id="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177606 4943 flags.go:64] FLAG: --qos-reserved="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177611 4943 flags.go:64] FLAG: --read-only-port="10255" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177616 4943 flags.go:64] FLAG: --register-node="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177622 4943 flags.go:64] FLAG: --register-schedulable="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177627 4943 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177636 4943 flags.go:64] FLAG: --registry-burst="10" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177641 4943 flags.go:64] FLAG: --registry-qps="5" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177646 4943 flags.go:64] FLAG: --reserved-cpus="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177651 4943 flags.go:64] FLAG: --reserved-memory="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177657 4943 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177662 4943 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177667 4943 flags.go:64] FLAG: --rotate-certificates="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177671 4943 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177676 4943 flags.go:64] FLAG: --runonce="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177682 4943 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177688 4943 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177693 4943 flags.go:64] FLAG: --seccomp-default="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177698 4943 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177703 4943 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177708 4943 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177713 4943 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177718 4943 flags.go:64] FLAG: --storage-driver-password="root" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177723 4943 flags.go:64] FLAG: --storage-driver-secure="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177728 4943 flags.go:64] FLAG: --storage-driver-table="stats" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177733 4943 flags.go:64] FLAG: --storage-driver-user="root" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177738 4943 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177743 4943 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177748 4943 flags.go:64] FLAG: --system-cgroups="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177754 4943 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177764 4943 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177768 4943 flags.go:64] FLAG: --tls-cert-file="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177773 4943 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177780 4943 flags.go:64] FLAG: --tls-min-version="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177785 4943 flags.go:64] FLAG: --tls-private-key-file="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177790 4943 flags.go:64] FLAG: --topology-manager-policy="none" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177795 4943 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177802 4943 flags.go:64] FLAG: --topology-manager-scope="container" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177807 4943 flags.go:64] FLAG: --v="2" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177813 4943 flags.go:64] FLAG: --version="false" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177820 4943 flags.go:64] FLAG: --vmodule="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177826 4943 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.177832 4943 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177952 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177958 4943 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177963 4943 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177968 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177973 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177980 4943 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177986 4943 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177992 4943 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.177997 4943 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178002 4943 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178006 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178010 4943 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178015 4943 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178020 4943 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178024 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178029 4943 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178033 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178037 4943 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178044 4943 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178048 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178054 4943 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178059 4943 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178064 4943 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178070 4943 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178075 4943 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178085 4943 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178092 4943 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178098 4943 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178103 4943 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178108 4943 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178115 4943 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178119 4943 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178124 4943 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178131 4943 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178136 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178141 4943 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178146 4943 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178150 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178154 4943 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178159 4943 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178163 4943 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178167 4943 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178171 4943 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178175 4943 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178180 4943 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178184 4943 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178188 4943 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178193 4943 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178197 4943 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178202 4943 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178206 4943 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178211 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178215 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178219 4943 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178223 4943 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178228 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178232 4943 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178239 4943 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178243 4943 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178248 4943 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178253 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178256 4943 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178260 4943 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178265 4943 feature_gate.go:330] unrecognized feature gate: Example Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178270 4943 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178275 4943 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178281 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178285 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178290 4943 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178294 4943 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.178299 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.178454 4943 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.188871 4943 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.188913 4943 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189877 4943 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189905 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189912 4943 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189918 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189924 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189929 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189942 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189949 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189955 4943 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189960 4943 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189965 4943 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189970 4943 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189975 4943 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189979 4943 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189983 4943 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189989 4943 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189994 4943 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.189998 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190010 4943 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190017 4943 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190024 4943 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190029 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190034 4943 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190039 4943 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190044 4943 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190048 4943 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190054 4943 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190059 4943 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190064 4943 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190069 4943 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190075 4943 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190083 4943 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190088 4943 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190093 4943 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190102 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190107 4943 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190112 4943 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190116 4943 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190122 4943 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190127 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190132 4943 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190137 4943 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190142 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190151 4943 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190155 4943 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190162 4943 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190170 4943 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190175 4943 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190180 4943 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190185 4943 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190191 4943 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190196 4943 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190200 4943 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190205 4943 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190210 4943 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190219 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190223 4943 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190227 4943 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190231 4943 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190235 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190240 4943 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190244 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190248 4943 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190252 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190258 4943 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190264 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190269 4943 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190275 4943 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190285 4943 feature_gate.go:330] unrecognized feature gate: Example Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190290 4943 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190297 4943 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.190305 4943 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190774 4943 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190804 4943 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190809 4943 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190814 4943 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190819 4943 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190826 4943 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190830 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190834 4943 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190839 4943 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190847 4943 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190853 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190857 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190861 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190865 4943 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190869 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190873 4943 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190876 4943 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190880 4943 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190884 4943 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190887 4943 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190891 4943 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190897 4943 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190901 4943 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190904 4943 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190910 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190913 4943 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190918 4943 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190921 4943 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190925 4943 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190929 4943 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190933 4943 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190937 4943 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190941 4943 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190944 4943 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190952 4943 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190956 4943 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190991 4943 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.190996 4943 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191000 4943 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191004 4943 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191008 4943 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191012 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191016 4943 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191020 4943 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191025 4943 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191029 4943 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191033 4943 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191039 4943 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191042 4943 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191046 4943 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191050 4943 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191054 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191058 4943 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191062 4943 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191066 4943 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191070 4943 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191074 4943 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191078 4943 feature_gate.go:330] unrecognized feature gate: Example Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191082 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191089 4943 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191095 4943 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191100 4943 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191105 4943 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191110 4943 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191115 4943 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191410 4943 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191417 4943 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191421 4943 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191425 4943 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191429 4943 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.191437 4943 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.191445 4943 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.191632 4943 server.go:940] "Client rotation is on, will bootstrap in background" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.194343 4943 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.194422 4943 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.194872 4943 server.go:997] "Starting client certificate rotation" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.194889 4943 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.195067 4943 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-25 17:55:11.603621636 +0000 UTC Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.195158 4943 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 635h21m26.408466237s for next certificate rotation Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.200895 4943 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.202436 4943 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.212181 4943 log.go:25] "Validated CRI v1 runtime API" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.235026 4943 log.go:25] "Validated CRI v1 image API" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.237769 4943 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.241828 4943 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-29-06-29-09-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.241900 4943 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.261158 4943 manager.go:217] Machine: {Timestamp:2025-11-29 06:33:45.258522925 +0000 UTC m=+0.188611698 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:7b86982e-0266-40f9-aa86-203795c8126b BootID:56e5e96f-c824-4364-b35c-8cd0f292a058 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:92:c6:bb Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:92:c6:bb Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:b4:3c:5d Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:68:b9:c4 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:60:af:ac Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:b2:b2:4e Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:1c:e9:9a Speed:-1 Mtu:1496} {Name:eth10 MacAddress:7e:ca:a2:a2:24:fe Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:ee:2d:b0:35:30:29 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.261488 4943 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.261722 4943 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.262327 4943 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.262503 4943 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.262542 4943 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.262827 4943 topology_manager.go:138] "Creating topology manager with none policy" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.262840 4943 container_manager_linux.go:303] "Creating device plugin manager" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.263040 4943 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.263119 4943 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.263506 4943 state_mem.go:36] "Initialized new in-memory state store" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.263624 4943 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.264663 4943 kubelet.go:418] "Attempting to sync node with API server" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.264688 4943 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.264719 4943 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.264737 4943 kubelet.go:324] "Adding apiserver pod source" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.264752 4943 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.267458 4943 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.267846 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.267876 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.267995 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.267942 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.268003 4943 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.269352 4943 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270192 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270228 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270237 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270244 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270260 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270271 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270279 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270295 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270304 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270319 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270356 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270365 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.270613 4943 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.271356 4943 server.go:1280] "Started kubelet" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.271636 4943 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.271761 4943 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.271760 4943 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.272679 4943 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 29 06:33:45 crc systemd[1]: Started Kubernetes Kubelet. Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.273478 4943 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.148:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c66a7e2b25e55 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 06:33:45.271303765 +0000 UTC m=+0.201392528,LastTimestamp:2025-11-29 06:33:45.271303765 +0000 UTC m=+0.201392528,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.274292 4943 server.go:460] "Adding debug handlers to kubelet server" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.275295 4943 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.275339 4943 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.275377 4943 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 12:56:35.079375793 +0000 UTC Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.275431 4943 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 558h22m49.803948814s for next certificate rotation Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.275585 4943 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.275994 4943 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.276012 4943 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.276085 4943 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.276590 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="200ms" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.277209 4943 factory.go:55] Registering systemd factory Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.277236 4943 factory.go:221] Registration of the systemd container factory successfully Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.282498 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.282622 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.282715 4943 factory.go:153] Registering CRI-O factory Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.284119 4943 factory.go:221] Registration of the crio container factory successfully Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.284294 4943 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.284352 4943 factory.go:103] Registering Raw factory Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.284377 4943 manager.go:1196] Started watching for new ooms in manager Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.285748 4943 manager.go:319] Starting recovery of all containers Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292111 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292175 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292199 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292218 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292236 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292255 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292272 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292291 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292310 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292326 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292343 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292360 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292377 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292397 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292414 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292436 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292455 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292473 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292491 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292509 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292526 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292544 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292561 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292613 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292632 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292650 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292711 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292731 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292750 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292769 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292788 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292837 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292858 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292879 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292898 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292916 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292934 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292953 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.292999 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293017 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293035 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293053 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293073 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293091 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293108 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293163 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293184 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293210 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293228 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293246 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293265 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293285 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293312 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293332 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293352 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293371 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293391 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293410 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293427 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293444 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293463 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293483 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293501 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293521 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293541 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293558 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293601 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293617 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293638 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293656 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293673 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293690 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293709 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293725 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293743 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293761 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293780 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293801 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293820 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293841 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293858 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293878 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293897 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293917 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293940 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293958 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293979 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.293999 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294021 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294038 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294058 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294080 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294099 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294118 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294137 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294158 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294176 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294195 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294212 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294232 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294250 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294267 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294284 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294302 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294329 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294348 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294368 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294389 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294410 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294434 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294453 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294483 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294504 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294525 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294545 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294588 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294606 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294623 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294642 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294661 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294678 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294697 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294714 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294734 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294754 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294772 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294791 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294810 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294829 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294848 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294866 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294885 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294906 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294927 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294946 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294967 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.294988 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295008 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295026 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295045 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295063 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295083 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295101 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295123 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295178 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295197 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295216 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295236 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295263 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295283 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295301 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295318 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295335 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295353 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295371 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295392 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295410 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295430 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295459 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295479 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295498 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295518 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295540 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295561 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295610 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295628 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295647 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295666 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295687 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295705 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295725 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295743 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295761 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295779 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295798 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.295817 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297199 4943 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297307 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297381 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297439 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297497 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297579 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297654 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297714 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297782 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297841 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297899 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.297959 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298020 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298089 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298158 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298218 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298275 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298331 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298397 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298463 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298525 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298609 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298682 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298751 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298815 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298903 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.298967 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299024 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299079 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299195 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299257 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299318 4943 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299373 4943 reconstruct.go:97] "Volume reconstruction finished" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.299421 4943 reconciler.go:26] "Reconciler: start to sync state" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.310357 4943 manager.go:324] Recovery completed Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.321716 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.323725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.323775 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.323785 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.324210 4943 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.324820 4943 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.324840 4943 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.324860 4943 state_mem.go:36] "Initialized new in-memory state store" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.326116 4943 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.326155 4943 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.326184 4943 kubelet.go:2335] "Starting kubelet main sync loop" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.326232 4943 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 29 06:33:45 crc kubenswrapper[4943]: W1129 06:33:45.326992 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.327061 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.376000 4943 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.426670 4943 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.432607 4943 policy_none.go:49] "None policy: Start" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.433615 4943 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.433645 4943 state_mem.go:35] "Initializing new in-memory state store" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.476464 4943 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.477614 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="400ms" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.495642 4943 manager.go:334] "Starting Device Plugin manager" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.495694 4943 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.495708 4943 server.go:79] "Starting device plugin registration server" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.496090 4943 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.496105 4943 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.496480 4943 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.496773 4943 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.496784 4943 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.502339 4943 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.597076 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.599287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.599347 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.599362 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.599401 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.600381 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.627158 4943 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.627362 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.628749 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.628791 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.628801 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.628929 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.629377 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.629479 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.629723 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.629815 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.629832 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.630099 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.630253 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.630295 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631122 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631248 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631219 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631122 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631409 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631423 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631373 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631597 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631716 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.631756 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632320 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632369 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632636 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632656 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632742 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632779 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.632745 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633790 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633829 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.633901 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.634219 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.634256 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.635012 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.635040 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.635052 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704711 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704769 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704800 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704832 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704858 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704886 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704907 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.704933 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705124 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705166 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705267 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705340 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705363 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.705387 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.801128 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.802601 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.802648 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.802656 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.802681 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.803166 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806396 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806461 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806501 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806531 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806556 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806600 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806625 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806626 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806647 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806678 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806701 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806701 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806737 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806749 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806769 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806792 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806599 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806759 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806753 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806796 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806762 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806932 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806816 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806819 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806901 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806975 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806990 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.807035 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.807044 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.806804 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: E1129 06:33:45.878263 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="800ms" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.968013 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 29 06:33:45 crc kubenswrapper[4943]: I1129 06:33:45.992340 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.009236 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.017455 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.020350 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-9c61b0ba8da894729a719d7c4de34218bc46140442892a2f97029d44936a66e6 WatchSource:0}: Error finding container 9c61b0ba8da894729a719d7c4de34218bc46140442892a2f97029d44936a66e6: Status 404 returned error can't find the container with id 9c61b0ba8da894729a719d7c4de34218bc46140442892a2f97029d44936a66e6 Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.021405 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-b68558364c0d8f3a377958b943acb23b41614b94e6fb780ae779bb1259f0e0b7 WatchSource:0}: Error finding container b68558364c0d8f3a377958b943acb23b41614b94e6fb780ae779bb1259f0e0b7: Status 404 returned error can't find the container with id b68558364c0d8f3a377958b943acb23b41614b94e6fb780ae779bb1259f0e0b7 Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.022316 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.030452 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-599cbe44f8a99c4c3606eee484188654e6da3b649cebb408126483e4510d0c1c WatchSource:0}: Error finding container 599cbe44f8a99c4c3606eee484188654e6da3b649cebb408126483e4510d0c1c: Status 404 returned error can't find the container with id 599cbe44f8a99c4c3606eee484188654e6da3b649cebb408126483e4510d0c1c Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.032725 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-4fee2fa4e1691fee6a7de20fe7e11b7455be851f5862c5068484df016ae94c90 WatchSource:0}: Error finding container 4fee2fa4e1691fee6a7de20fe7e11b7455be851f5862c5068484df016ae94c90: Status 404 returned error can't find the container with id 4fee2fa4e1691fee6a7de20fe7e11b7455be851f5862c5068484df016ae94c90 Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.175039 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.175133 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.203463 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.204979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.205025 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.205042 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.205078 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.205521 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.231619 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.231705 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.244578 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.244676 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.272926 4943 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.330342 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"599cbe44f8a99c4c3606eee484188654e6da3b649cebb408126483e4510d0c1c"} Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.331495 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b68558364c0d8f3a377958b943acb23b41614b94e6fb780ae779bb1259f0e0b7"} Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.332433 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9c61b0ba8da894729a719d7c4de34218bc46140442892a2f97029d44936a66e6"} Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.333594 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"439957c3ba1808651456beb49cf434c1ab1149823ce820745a56edf61d45b564"} Nov 29 06:33:46 crc kubenswrapper[4943]: I1129 06:33:46.334631 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4fee2fa4e1691fee6a7de20fe7e11b7455be851f5862c5068484df016ae94c90"} Nov 29 06:33:46 crc kubenswrapper[4943]: W1129 06:33:46.645790 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.646188 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 29 06:33:46 crc kubenswrapper[4943]: E1129 06:33:46.679634 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="1.6s" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.005873 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.007304 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.007349 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.007361 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.007387 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:33:47 crc kubenswrapper[4943]: E1129 06:33:47.007893 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.272672 4943 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.338741 4943 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c" exitCode=0 Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.338806 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.338855 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.339706 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.339735 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.339744 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.343356 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.343397 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.343409 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.343418 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.343424 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.348549 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.348597 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.348606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.349221 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68" exitCode=0 Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.349284 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.349288 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.350120 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.350145 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.350155 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.352239 4943 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842" exitCode=0 Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.352324 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.352327 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.353367 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.353415 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.353424 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.354328 4943 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1" exitCode=0 Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.354362 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1"} Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.354428 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.355615 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356508 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356529 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356538 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356660 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356728 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.356756 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.453520 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:47 crc kubenswrapper[4943]: I1129 06:33:47.459718 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.357983 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.358033 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.358040 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.358047 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.358977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.359002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.359011 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360757 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360793 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360815 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360823 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.360828 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.361651 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.361682 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.361694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.362804 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.362831 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.363396 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.363427 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.363439 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.364314 4943 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16" exitCode=0 Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.364351 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16"} Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.364406 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.364416 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365230 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365252 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365261 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365261 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.365293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.499931 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.609061 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.610593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.610629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.610639 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:48 crc kubenswrapper[4943]: I1129 06:33:48.610667 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.369780 4943 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3" exitCode=0 Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.369838 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3"} Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.369947 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.369991 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.369998 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.370008 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.370053 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.370008 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.370447 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.370540 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.372063 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.372090 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.372104 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374423 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374470 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374842 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374930 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.374980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.375159 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.375236 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.375283 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.378742 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.378850 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:49 crc kubenswrapper[4943]: I1129 06:33:49.378879 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.123424 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.381044 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c"} Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.381085 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71"} Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.381124 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.381167 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.381222 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.382070 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.382110 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.382125 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.382941 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.382998 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.383012 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.579299 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.579469 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.580732 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.580763 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:50 crc kubenswrapper[4943]: I1129 06:33:50.580774 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.388280 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b"} Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.388326 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d"} Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.388337 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a"} Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.388443 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.389434 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.389481 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.389496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.500617 4943 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.500698 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.728084 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.728231 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.729441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.729495 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:51 crc kubenswrapper[4943]: I1129 06:33:51.729507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.070177 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.188250 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.390954 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.391006 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392043 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392077 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392088 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392104 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392142 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.392154 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.851972 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.852123 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.853176 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.853240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:52 crc kubenswrapper[4943]: I1129 06:33:52.853254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:53 crc kubenswrapper[4943]: I1129 06:33:53.393388 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:53 crc kubenswrapper[4943]: I1129 06:33:53.394233 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:53 crc kubenswrapper[4943]: I1129 06:33:53.394276 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:53 crc kubenswrapper[4943]: I1129 06:33:53.394287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:55 crc kubenswrapper[4943]: E1129 06:33:55.502637 4943 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.657171 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.657406 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.659392 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.659451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.659469 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:56 crc kubenswrapper[4943]: I1129 06:33:56.662610 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.019108 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.019353 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.021433 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.021482 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.021500 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.407397 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.408341 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.408393 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:33:57 crc kubenswrapper[4943]: I1129 06:33:57.408412 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:33:58 crc kubenswrapper[4943]: W1129 06:33:58.179981 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 29 06:33:58 crc kubenswrapper[4943]: I1129 06:33:58.180102 4943 trace.go:236] Trace[1047017188]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 06:33:48.178) (total time: 10001ms): Nov 29 06:33:58 crc kubenswrapper[4943]: Trace[1047017188]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:33:58.179) Nov 29 06:33:58 crc kubenswrapper[4943]: Trace[1047017188]: [10.001350922s] [10.001350922s] END Nov 29 06:33:58 crc kubenswrapper[4943]: E1129 06:33:58.180131 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 29 06:33:58 crc kubenswrapper[4943]: I1129 06:33:58.273795 4943 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 29 06:33:58 crc kubenswrapper[4943]: E1129 06:33:58.281513 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 29 06:33:58 crc kubenswrapper[4943]: W1129 06:33:58.516747 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 29 06:33:58 crc kubenswrapper[4943]: I1129 06:33:58.516876 4943 trace.go:236] Trace[1894546014]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 06:33:48.515) (total time: 10001ms): Nov 29 06:33:58 crc kubenswrapper[4943]: Trace[1894546014]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:33:58.516) Nov 29 06:33:58 crc kubenswrapper[4943]: Trace[1894546014]: [10.001676829s] [10.001676829s] END Nov 29 06:33:58 crc kubenswrapper[4943]: E1129 06:33:58.516919 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 29 06:33:58 crc kubenswrapper[4943]: E1129 06:33:58.611638 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 29 06:33:59 crc kubenswrapper[4943]: W1129 06:33:59.270327 4943 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 29 06:33:59 crc kubenswrapper[4943]: I1129 06:33:59.270404 4943 trace.go:236] Trace[547380226]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 06:33:49.268) (total time: 10001ms): Nov 29 06:33:59 crc kubenswrapper[4943]: Trace[547380226]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:33:59.270) Nov 29 06:33:59 crc kubenswrapper[4943]: Trace[547380226]: [10.001605677s] [10.001605677s] END Nov 29 06:33:59 crc kubenswrapper[4943]: E1129 06:33:59.270423 4943 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 29 06:33:59 crc kubenswrapper[4943]: I1129 06:33:59.655425 4943 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 29 06:33:59 crc kubenswrapper[4943]: I1129 06:33:59.655483 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 29 06:33:59 crc kubenswrapper[4943]: I1129 06:33:59.660812 4943 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 29 06:33:59 crc kubenswrapper[4943]: I1129 06:33:59.660868 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.500679 4943 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.500739 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.734739 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.735005 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.736430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.736472 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.736482 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.742023 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.811841 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.813491 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.813610 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.813641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:01 crc kubenswrapper[4943]: I1129 06:34:01.813673 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:34:01 crc kubenswrapper[4943]: E1129 06:34:01.817221 4943 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.048301 4943 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.421504 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.421585 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.422462 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.422528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.422543 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:02 crc kubenswrapper[4943]: I1129 06:34:02.512788 4943 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.312869 4943 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.662671 4943 trace.go:236] Trace[607953958]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 06:33:49.682) (total time: 14980ms): Nov 29 06:34:04 crc kubenswrapper[4943]: Trace[607953958]: ---"Objects listed" error: 14980ms (06:34:04.662) Nov 29 06:34:04 crc kubenswrapper[4943]: Trace[607953958]: [14.980424721s] [14.980424721s] END Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.662704 4943 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.666940 4943 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.686482 4943 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42460->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.686537 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42460->192.168.126.11:17697: read: connection reset by peer" Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.688076 4943 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 29 06:34:04 crc kubenswrapper[4943]: I1129 06:34:04.688144 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.278184 4943 apiserver.go:52] "Watching apiserver" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.280964 4943 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281163 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281532 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281600 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281610 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281550 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.281665 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.281811 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.281967 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.282128 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.282212 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284654 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284671 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284709 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284763 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284888 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.284982 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.285041 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.285327 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.286886 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.323393 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.339553 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.351237 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.361751 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.372339 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.377220 4943 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.382060 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.392160 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.401550 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.410175 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.421944 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.429779 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.431919 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070" exitCode=255 Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.431965 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070"} Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.433448 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.441135 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.442110 4943 scope.go:117] "RemoveContainer" containerID="c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.442270 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.455346 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.468071 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470882 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470920 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470940 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470958 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470973 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.470988 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471003 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471019 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471033 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471050 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471066 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471081 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471095 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471112 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471127 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471143 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471159 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471195 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471224 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471251 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471269 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471285 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471300 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471329 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471357 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471385 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471400 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471416 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471430 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471464 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471478 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471494 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471508 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471524 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471574 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471599 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471620 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471640 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471663 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471684 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471781 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471827 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471849 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471872 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471896 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471917 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471939 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471935 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471961 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.471982 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472003 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472025 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472046 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472069 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472090 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472112 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472137 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472160 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472181 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472203 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472223 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472243 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472267 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472333 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472359 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472382 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472401 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472418 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472432 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472450 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472458 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472467 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472540 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472589 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472599 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472616 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472651 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472655 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472676 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472700 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472723 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472749 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472771 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472795 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472809 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472818 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472846 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472872 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472894 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472916 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472940 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472964 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.472986 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473007 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473032 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473045 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473058 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473083 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473116 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.473138 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:05.973122983 +0000 UTC m=+20.903211726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473156 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473158 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473177 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473194 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473214 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473232 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473248 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473269 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473286 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473303 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473316 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473324 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473346 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473364 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473381 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473400 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473417 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473434 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473455 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473475 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473493 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473510 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473514 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473525 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473533 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473542 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473613 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473639 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473706 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473731 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473755 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473782 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473808 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473831 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473858 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473882 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473906 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473930 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473954 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473979 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474004 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474030 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474052 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474076 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474104 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474127 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474152 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474208 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474233 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474254 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474279 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474318 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474343 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474367 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474407 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474431 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474463 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474498 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474522 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474545 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474586 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474616 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474640 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474664 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474686 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474710 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474734 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474758 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474781 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474806 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474829 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474854 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474879 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474903 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474927 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474952 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474975 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474999 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475021 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475046 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475069 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475093 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475116 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475140 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475181 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476745 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476798 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476826 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487350 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473730 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473772 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.473908 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474033 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474128 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474287 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474441 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.474491 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475405 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.475806 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476036 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493014 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493087 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493553 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493624 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491394 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494034 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476408 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.478135 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.478577 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488108 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.478782 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.478841 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479277 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479467 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479499 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479356 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494824 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479775 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479751 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479809 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479938 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.479957 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480079 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480416 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480431 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480499 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480537 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480831 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.480962 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.481000 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.481279 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.481333 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.481339 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.481490 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.482527 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.482525 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.482781 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.483035 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.483257 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.483361 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.483448 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.483477 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485678 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485729 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485749 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485945 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485978 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.495043 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.485992 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486190 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486354 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486519 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486625 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486775 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486937 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487530 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487552 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487868 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487916 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488366 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488524 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488572 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488610 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.488875 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.489173 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.489430 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.489452 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.489848 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490111 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490330 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490531 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490536 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490604 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490619 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490756 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.490849 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491008 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491223 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497153 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491277 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491471 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491485 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491693 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.487303 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476124 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493916 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494099 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.491508 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494354 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494426 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494604 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494642 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494667 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.486123 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.495523 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.495872 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.495919 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496242 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496352 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496542 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496591 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496659 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.476351 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496669 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496716 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496776 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496776 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.496784 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497022 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494856 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.494837 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497470 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.493837 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497520 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497542 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497533 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497585 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497867 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497937 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497986 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498022 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498042 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498059 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498078 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498103 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498126 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498149 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498163 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498173 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.497065 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498222 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498279 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498309 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.498339 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.499043 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.499437 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.499485 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.499895 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.499935 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.500015 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.500347 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.500457 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.501107 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.501771 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.501852 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.501878 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.501934 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502119 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502213 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502293 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502329 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.502418 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:06.002402294 +0000 UTC m=+20.932491127 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502457 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502499 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502716 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502798 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502846 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.502870 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.503306 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.503370 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:06.003359398 +0000 UTC m=+20.933448151 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503454 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503469 4943 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503482 4943 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503496 4943 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503510 4943 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503518 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503522 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503552 4943 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503578 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503588 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503597 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503606 4943 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503615 4943 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503624 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503633 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503641 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503650 4943 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503658 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503669 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503679 4943 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503688 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503697 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503706 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503715 4943 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503723 4943 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503732 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503741 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503749 4943 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503758 4943 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.503766 4943 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504147 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504458 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504618 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504672 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504780 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.504864 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.505781 4943 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506469 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506517 4943 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506531 4943 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506546 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506837 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506875 4943 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506898 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506914 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506928 4943 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506940 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506953 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506967 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506980 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.506992 4943 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507005 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507037 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507050 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507062 4943 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507075 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507087 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507100 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507112 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507123 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507134 4943 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507148 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507164 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507175 4943 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507186 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507198 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507220 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507233 4943 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507247 4943 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507259 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507497 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507509 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507520 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507534 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507544 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507554 4943 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507581 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507602 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507614 4943 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507625 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507636 4943 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507657 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507669 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507680 4943 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507691 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507745 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507762 4943 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507778 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507790 4943 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507802 4943 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507822 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507834 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507845 4943 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507857 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507868 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507880 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507891 4943 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507904 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507915 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507928 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507941 4943 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507954 4943 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507966 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507979 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.507990 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508001 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508014 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508028 4943 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508040 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508051 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508062 4943 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508074 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508087 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508098 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508110 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508120 4943 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508133 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508144 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508155 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508170 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508182 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508194 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508205 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508217 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508228 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508239 4943 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508249 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508261 4943 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508273 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508285 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508295 4943 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508307 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508318 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508330 4943 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508340 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508352 4943 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508363 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.508374 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.514266 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.515326 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.519370 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.519510 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.519631 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.519782 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.519839 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.519948 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:06.019929421 +0000 UTC m=+20.950018164 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.521809 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.522083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.522685 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.522793 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.523682 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.524131 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.524384 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.524396 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.524762 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.525098 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.525372 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.526441 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.531381 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.532788 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.532813 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.532825 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:05 crc kubenswrapper[4943]: E1129 06:34:05.532874 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:06.032857624 +0000 UTC m=+20.962946377 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.532975 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.533097 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.533157 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.535471 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.535068 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.536205 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.541754 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.542028 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.542456 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.543002 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.549129 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.550369 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.550493 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.551104 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.553341 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.555421 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.555446 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.555539 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.555823 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.555870 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.559641 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.559863 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.561299 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.561391 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.561675 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.563385 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.563572 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.563885 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.564253 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.565025 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.566080 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.575693 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.589147 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.594586 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.596001 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.609846 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614065 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614129 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614175 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614185 4943 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614194 4943 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614203 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614212 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614220 4943 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614228 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614236 4943 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614246 4943 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614254 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614263 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614271 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614303 4943 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614313 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614322 4943 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614331 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614341 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614351 4943 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614363 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614374 4943 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614384 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614395 4943 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614404 4943 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614414 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614423 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614433 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614442 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614452 4943 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614462 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614471 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614480 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614490 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614500 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614510 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614520 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614529 4943 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614538 4943 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614546 4943 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614554 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.614885 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.615164 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.618481 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619514 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619557 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619583 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619592 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619601 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619610 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619618 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619627 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619647 4943 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619656 4943 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619667 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619676 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619685 4943 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619693 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619709 4943 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619717 4943 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619725 4943 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619733 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619742 4943 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619750 4943 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619758 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619768 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619777 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619786 4943 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619793 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-drzxp"] Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619796 4943 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.619947 4943 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.620049 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: W1129 06:34:05.622465 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-9b549dfbb885cbc188ef4df90a88a61d517686911b5b1e2c50eed83226b3bb58 WatchSource:0}: Error finding container 9b549dfbb885cbc188ef4df90a88a61d517686911b5b1e2c50eed83226b3bb58: Status 404 returned error can't find the container with id 9b549dfbb885cbc188ef4df90a88a61d517686911b5b1e2c50eed83226b3bb58 Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.627878 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.628055 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.628269 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.668924 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.703452 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.721324 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/43c2941f-a05b-4905-a72e-a5978229642e-hosts-file\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.721394 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6grpl\" (UniqueName: \"kubernetes.io/projected/43c2941f-a05b-4905-a72e-a5978229642e-kube-api-access-6grpl\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.734124 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.759028 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.804261 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.816123 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.822706 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/43c2941f-a05b-4905-a72e-a5978229642e-hosts-file\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.822745 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6grpl\" (UniqueName: \"kubernetes.io/projected/43c2941f-a05b-4905-a72e-a5978229642e-kube-api-access-6grpl\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.822882 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/43c2941f-a05b-4905-a72e-a5978229642e-hosts-file\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.828511 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.843751 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.853139 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6grpl\" (UniqueName: \"kubernetes.io/projected/43c2941f-a05b-4905-a72e-a5978229642e-kube-api-access-6grpl\") pod \"node-resolver-drzxp\" (UID: \"43c2941f-a05b-4905-a72e-a5978229642e\") " pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.903390 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 06:34:05 crc kubenswrapper[4943]: W1129 06:34:05.912312 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ad9a85b1ee052d5ee9b904aab5b40c910ce822378329e8f96c97fbc13a71d053 WatchSource:0}: Error finding container ad9a85b1ee052d5ee9b904aab5b40c910ce822378329e8f96c97fbc13a71d053: Status 404 returned error can't find the container with id ad9a85b1ee052d5ee9b904aab5b40c910ce822378329e8f96c97fbc13a71d053 Nov 29 06:34:05 crc kubenswrapper[4943]: I1129 06:34:05.930512 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-drzxp" Nov 29 06:34:05 crc kubenswrapper[4943]: W1129 06:34:05.953777 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43c2941f_a05b_4905_a72e_a5978229642e.slice/crio-b0d05e9df1470ffce97e14bd545076cc7458f716be5ed5f23999da6ef6fe07f2 WatchSource:0}: Error finding container b0d05e9df1470ffce97e14bd545076cc7458f716be5ed5f23999da6ef6fe07f2: Status 404 returned error can't find the container with id b0d05e9df1470ffce97e14bd545076cc7458f716be5ed5f23999da6ef6fe07f2 Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.024121 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.024203 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.024247 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.024272 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024354 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024392 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024408 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:07.024391772 +0000 UTC m=+21.954480525 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024499 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024517 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024523 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:07.024505165 +0000 UTC m=+21.954593978 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024529 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024542 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:07.024530885 +0000 UTC m=+21.954619738 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.024582 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:07.024550966 +0000 UTC m=+21.954639829 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.125602 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.125809 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.125935 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.125995 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:06 crc kubenswrapper[4943]: E1129 06:34:06.126099 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:07.126084579 +0000 UTC m=+22.056173332 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.392405 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-f4gf7"] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.392780 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.398606 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.398828 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-ptcqh"] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.399412 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.399985 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.405380 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.405695 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-kh8qv"] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.405683 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.406371 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.406424 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.406544 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.406745 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.406893 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.407131 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.409592 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.411394 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.412435 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.422850 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428121 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-system-cni-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428334 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-netns\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428434 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-os-release\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428531 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-kubelet\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428621 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-os-release\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428714 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-bin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428802 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcshj\" (UniqueName: \"kubernetes.io/projected/ae851705-b905-4caa-932a-345918c2d3f7-kube-api-access-vcshj\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428890 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.428989 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cnibin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429076 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-multus\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429176 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-hostroot\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429278 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429374 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9452a4f7-8768-4190-b544-50f80bc5ebf6-rootfs\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429479 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-conf-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429601 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-binary-copy\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429710 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-socket-dir-parent\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429788 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cni-binary-copy\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429858 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-etc-kubernetes\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.429978 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430083 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-daemon-config\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430180 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-multus-certs\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430266 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9452a4f7-8768-4190-b544-50f80bc5ebf6-mcd-auth-proxy-config\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430347 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-system-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430435 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-cnibin\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430530 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9452a4f7-8768-4190-b544-50f80bc5ebf6-proxy-tls\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430642 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6gtc\" (UniqueName: \"kubernetes.io/projected/9452a4f7-8768-4190-b544-50f80bc5ebf6-kube-api-access-s6gtc\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430743 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-k8s-cni-cncf-io\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.430778 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmqqg\" (UniqueName: \"kubernetes.io/projected/ca406df5-4c80-44b5-9092-4ff17b0b0c72-kube-api-access-nmqqg\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.436379 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.437854 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.438016 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.439162 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-drzxp" event={"ID":"43c2941f-a05b-4905-a72e-a5978229642e","Type":"ContainerStarted","Data":"d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.439192 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-drzxp" event={"ID":"43c2941f-a05b-4905-a72e-a5978229642e","Type":"ContainerStarted","Data":"b0d05e9df1470ffce97e14bd545076cc7458f716be5ed5f23999da6ef6fe07f2"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.439922 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ad9a85b1ee052d5ee9b904aab5b40c910ce822378329e8f96c97fbc13a71d053"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.441169 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.441198 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.441212 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9b549dfbb885cbc188ef4df90a88a61d517686911b5b1e2c50eed83226b3bb58"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.442402 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.442628 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.442669 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c92d43934d901187d920daf74b04f81c55d9259d2dddc58ac70aade2bd58bcef"} Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.455872 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.467811 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.478860 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.487955 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.497957 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.509438 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.522980 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531137 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-hostroot\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531196 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531224 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9452a4f7-8768-4190-b544-50f80bc5ebf6-rootfs\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531234 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-hostroot\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531293 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-conf-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531302 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9452a4f7-8768-4190-b544-50f80bc5ebf6-rootfs\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531245 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-conf-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531357 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-binary-copy\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531375 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-socket-dir-parent\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531391 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cni-binary-copy\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531421 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531435 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-daemon-config\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531450 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-multus-certs\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531463 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-etc-kubernetes\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531492 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-etc-kubernetes\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531552 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531559 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-socket-dir-parent\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531625 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-multus-certs\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531662 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9452a4f7-8768-4190-b544-50f80bc5ebf6-mcd-auth-proxy-config\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531692 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-system-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531716 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-cnibin\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531736 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9452a4f7-8768-4190-b544-50f80bc5ebf6-proxy-tls\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531770 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6gtc\" (UniqueName: \"kubernetes.io/projected/9452a4f7-8768-4190-b544-50f80bc5ebf6-kube-api-access-s6gtc\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531789 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-k8s-cni-cncf-io\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531792 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-system-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531810 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmqqg\" (UniqueName: \"kubernetes.io/projected/ca406df5-4c80-44b5-9092-4ff17b0b0c72-kube-api-access-nmqqg\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531833 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-system-cni-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531855 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-netns\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531886 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-os-release\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531910 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-kubelet\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.531936 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-bin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532025 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-os-release\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532051 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcshj\" (UniqueName: \"kubernetes.io/projected/ae851705-b905-4caa-932a-345918c2d3f7-kube-api-access-vcshj\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532071 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532092 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cnibin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532124 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-binary-copy\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532128 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-multus\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532152 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-multus\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532208 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-k8s-cni-cncf-io\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532214 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-daemon-config\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532273 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cni-binary-copy\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532280 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ae851705-b905-4caa-932a-345918c2d3f7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532296 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9452a4f7-8768-4190-b544-50f80bc5ebf6-mcd-auth-proxy-config\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532333 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-cnibin\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532403 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-os-release\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532432 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-system-cni-dir\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532443 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-cni-bin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532456 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-run-netns\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532477 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-host-var-lib-kubelet\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532656 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ae851705-b905-4caa-932a-345918c2d3f7-os-release\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532681 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-cnibin\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.532773 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ca406df5-4c80-44b5-9092-4ff17b0b0c72-multus-cni-dir\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.535295 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9452a4f7-8768-4190-b544-50f80bc5ebf6-proxy-tls\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.541149 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.554177 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6gtc\" (UniqueName: \"kubernetes.io/projected/9452a4f7-8768-4190-b544-50f80bc5ebf6-kube-api-access-s6gtc\") pod \"machine-config-daemon-f4gf7\" (UID: \"9452a4f7-8768-4190-b544-50f80bc5ebf6\") " pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.554543 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.560000 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmqqg\" (UniqueName: \"kubernetes.io/projected/ca406df5-4c80-44b5-9092-4ff17b0b0c72-kube-api-access-nmqqg\") pod \"multus-kh8qv\" (UID: \"ca406df5-4c80-44b5-9092-4ff17b0b0c72\") " pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.561406 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcshj\" (UniqueName: \"kubernetes.io/projected/ae851705-b905-4caa-932a-345918c2d3f7-kube-api-access-vcshj\") pod \"multus-additional-cni-plugins-ptcqh\" (UID: \"ae851705-b905-4caa-932a-345918c2d3f7\") " pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.571875 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.580342 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.611935 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.636874 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.652303 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.664141 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.676320 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.686736 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.699386 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.708746 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.717720 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" Nov 29 06:34:06 crc kubenswrapper[4943]: W1129 06:34:06.718158 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9452a4f7_8768_4190_b544_50f80bc5ebf6.slice/crio-ea6a3ffd6c22184e94d22585862aef262a99abf3b46089c6f4cc62cbbd10e764 WatchSource:0}: Error finding container ea6a3ffd6c22184e94d22585862aef262a99abf3b46089c6f4cc62cbbd10e764: Status 404 returned error can't find the container with id ea6a3ffd6c22184e94d22585862aef262a99abf3b46089c6f4cc62cbbd10e764 Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.722789 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kh8qv" Nov 29 06:34:06 crc kubenswrapper[4943]: W1129 06:34:06.732597 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae851705_b905_4caa_932a_345918c2d3f7.slice/crio-9784208d57d8b024aa5a82dae2321cde355a05cd455f52f199410c2a02956b65 WatchSource:0}: Error finding container 9784208d57d8b024aa5a82dae2321cde355a05cd455f52f199410c2a02956b65: Status 404 returned error can't find the container with id 9784208d57d8b024aa5a82dae2321cde355a05cd455f52f199410c2a02956b65 Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.792388 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lrsts"] Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.798538 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.803625 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.803793 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.803850 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.803925 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.803696 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.804107 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.804384 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834113 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834149 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834165 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834178 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834193 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834218 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834233 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834249 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834264 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834284 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfg9s\" (UniqueName: \"kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834311 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834326 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834341 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834354 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834368 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834389 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834405 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834422 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.834436 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.835017 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.835007 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.854932 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.868131 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.879608 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.893523 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.904551 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.920874 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.931978 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.935829 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.935981 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfg9s\" (UniqueName: \"kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936085 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936202 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936309 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.935986 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936405 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936464 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936574 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936606 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936614 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936641 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936648 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936662 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936682 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936690 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936707 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936713 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936728 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936742 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936745 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936757 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936774 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936776 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936861 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936809 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936932 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936954 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936913 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936892 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.936978 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.937004 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.937011 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.937031 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.937055 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.938006 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.939668 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.944913 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.952469 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfg9s\" (UniqueName: \"kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s\") pod \"ovnkube-node-lrsts\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.955126 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.965925 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:06 crc kubenswrapper[4943]: I1129 06:34:06.977378 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.037755 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038020 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:09.037991589 +0000 UTC m=+23.968080342 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.038275 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.038405 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.038542 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038754 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038814 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038831 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038843 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038818 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:09.038806479 +0000 UTC m=+23.968895312 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.038887 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:09.038878961 +0000 UTC m=+23.968967704 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.039168 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.039340 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:09.039313512 +0000 UTC m=+23.969402335 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.096333 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.115322 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.115667 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.117012 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.125540 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: W1129 06:34:07.132044 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78ac9747_c331_4c4f_af69_5153d05f4097.slice/crio-f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 WatchSource:0}: Error finding container f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1: Status 404 returned error can't find the container with id f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.139717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.139889 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.139974 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.139991 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.140052 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:09.140034096 +0000 UTC m=+24.070122849 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.140815 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.156086 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.167445 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.180350 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.199401 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.213778 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.243090 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.273383 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.287370 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.305713 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.327822 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.327863 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.327979 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.327973 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.328092 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:07 crc kubenswrapper[4943]: E1129 06:34:07.328141 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.334587 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.335180 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.336092 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.337649 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.338513 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.339826 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.340445 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.341240 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.342363 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.343133 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.344367 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.345203 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.346532 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.347128 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.347806 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.349041 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.349734 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.349724 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.350858 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.351346 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.352096 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.353350 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.354009 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.355384 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.355884 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.356928 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.357427 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.358125 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.359527 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.360031 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.361052 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.361515 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.362428 4943 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.362528 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.364237 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.365440 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.365920 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.366541 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.367752 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.368502 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.369654 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.370294 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.371502 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.372039 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.373025 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.373733 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.375069 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.375542 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.376493 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.377122 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.378315 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.378909 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.379952 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.380591 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.381554 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.382153 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.382645 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.388638 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.404604 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.418073 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.430476 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.441583 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.447056 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.447117 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"60547f1b47aa8cfb65efc94505e86894e6fe518bd9b6f1d0fdfb2d477cf5fc5a"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.448454 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerStarted","Data":"ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.448478 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerStarted","Data":"9784208d57d8b024aa5a82dae2321cde355a05cd455f52f199410c2a02956b65"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.449875 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.450009 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"ea6a3ffd6c22184e94d22585862aef262a99abf3b46089c6f4cc62cbbd10e764"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.451014 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4" exitCode=0 Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.451060 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.451085 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1"} Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.459028 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.474830 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.486595 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.498204 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.531655 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.566651 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.610265 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.653374 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.694547 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.735243 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.768159 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.808716 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.849343 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.891828 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.930041 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:07 crc kubenswrapper[4943]: I1129 06:34:07.968813 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.009069 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.051016 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.106637 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.218285 4943 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.219980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.220018 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.220031 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.220129 4943 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.226920 4943 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.227204 4943 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.228253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.228282 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.228292 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.228308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.228321 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.245660 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.248993 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.249026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.249037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.249054 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.249066 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.265704 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.269368 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.269398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.269409 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.269425 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.269438 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.281483 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-fm4js"] Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.281421 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.281974 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.283517 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.283753 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.285218 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.285254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.285265 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.285280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.285291 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.286310 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.286351 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.302402 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.305611 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.305655 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.305665 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.305681 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.305694 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.307658 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.318169 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: E1129 06:34:08.318324 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.319946 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.319992 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.320005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.320022 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.320033 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.323796 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.336518 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.354253 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3b1af38-c6d6-4943-9cb4-a0482aee684d-serviceca\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.354329 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3b1af38-c6d6-4943-9cb4-a0482aee684d-host\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.354400 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbkpk\" (UniqueName: \"kubernetes.io/projected/f3b1af38-c6d6-4943-9cb4-a0482aee684d-kube-api-access-wbkpk\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.382913 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.417243 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.424035 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.424067 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.424077 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.424091 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.424100 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.455180 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbkpk\" (UniqueName: \"kubernetes.io/projected/f3b1af38-c6d6-4943-9cb4-a0482aee684d-kube-api-access-wbkpk\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.455218 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3b1af38-c6d6-4943-9cb4-a0482aee684d-serviceca\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.455238 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3b1af38-c6d6-4943-9cb4-a0482aee684d-host\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.455305 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3b1af38-c6d6-4943-9cb4-a0482aee684d-host\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.456328 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3b1af38-c6d6-4943-9cb4-a0482aee684d-serviceca\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.459696 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.462144 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.462798 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.462833 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.465001 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.466577 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793" exitCode=0 Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.467067 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.484727 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbkpk\" (UniqueName: \"kubernetes.io/projected/f3b1af38-c6d6-4943-9cb4-a0482aee684d-kube-api-access-wbkpk\") pod \"node-ca-fm4js\" (UID: \"f3b1af38-c6d6-4943-9cb4-a0482aee684d\") " pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.504625 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.508539 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.508986 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.527353 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.528148 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.528184 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.528196 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.528213 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.528224 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.569469 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.595462 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-fm4js" Nov 29 06:34:08 crc kubenswrapper[4943]: W1129 06:34:08.605470 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3b1af38_c6d6_4943_9cb4_a0482aee684d.slice/crio-5c182ac09df7ce09c2ab6fcac12c759482b4ddd41fe23a6d5d28f6f3165e668f WatchSource:0}: Error finding container 5c182ac09df7ce09c2ab6fcac12c759482b4ddd41fe23a6d5d28f6f3165e668f: Status 404 returned error can't find the container with id 5c182ac09df7ce09c2ab6fcac12c759482b4ddd41fe23a6d5d28f6f3165e668f Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.608752 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.630844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.631105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.631116 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.631132 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.631142 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.646920 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.688650 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.729904 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.734290 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.734332 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.734344 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.734356 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.734365 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.771263 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.812755 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.837083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.837117 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.837126 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.837139 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.837147 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.849638 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.887764 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.934170 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.939807 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.939852 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.939868 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.939889 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.939906 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:08Z","lastTransitionTime":"2025-11-29T06:34:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:08 crc kubenswrapper[4943]: I1129 06:34:08.980096 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.011797 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.042131 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.042169 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.042213 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.042232 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.042244 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.050260 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.060047 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.060160 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:13.060141407 +0000 UTC m=+27.990230170 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.060955 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.061090 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061138 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061165 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.061146 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061179 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061244 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061278 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061284 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:13.061261225 +0000 UTC m=+27.991349988 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061403 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:13.061369298 +0000 UTC m=+27.991458071 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.061428 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:13.061416699 +0000 UTC m=+27.991505462 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.089542 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.126921 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.144904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.144941 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.144951 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.144966 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.144977 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.162618 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.162829 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.162858 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.162874 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.162946 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:13.162925552 +0000 UTC m=+28.093014325 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.173514 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.210146 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.247354 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.247391 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.247403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.247417 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.247430 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.255324 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.288880 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.327393 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.327426 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.327392 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.327541 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.327646 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:09 crc kubenswrapper[4943]: E1129 06:34:09.327717 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.328223 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.352021 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.352087 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.352099 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.352123 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.352135 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.370201 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.407741 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.454853 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.454896 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.454907 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.454924 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.454950 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.470701 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-fm4js" event={"ID":"f3b1af38-c6d6-4943-9cb4-a0482aee684d","Type":"ContainerStarted","Data":"0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.470743 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-fm4js" event={"ID":"f3b1af38-c6d6-4943-9cb4-a0482aee684d","Type":"ContainerStarted","Data":"5c182ac09df7ce09c2ab6fcac12c759482b4ddd41fe23a6d5d28f6f3165e668f"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.474426 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.474465 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.474478 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.474502 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.476539 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953" exitCode=0 Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.476602 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.494104 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.511420 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.529611 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.557259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.557313 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.557323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.557336 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.557345 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.572757 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.608929 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.647152 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.659935 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.659972 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.659982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.659998 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.660010 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.691639 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.731837 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.761974 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.762033 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.762044 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.762061 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.762072 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.772085 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.809745 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.850724 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.863844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.863883 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.863893 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.863908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.863918 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.890656 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.932533 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.965847 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.965890 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.965904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.965920 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.965932 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:09Z","lastTransitionTime":"2025-11-29T06:34:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:09 crc kubenswrapper[4943]: I1129 06:34:09.977370 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.034359 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.055221 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.067858 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.067894 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.067906 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.067921 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.067932 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.090150 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.130742 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.168141 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.169338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.169365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.169373 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.169386 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.169396 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.207415 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.247743 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.272208 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.272241 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.272253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.272270 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.272283 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.289623 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.336303 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.368318 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.375614 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.375716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.375737 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.375847 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.375866 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.407630 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.448463 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.479236 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.479277 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.479287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.479305 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.479317 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.482425 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57" exitCode=0 Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.482467 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.488293 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.528896 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.569315 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.581436 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.581470 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.581481 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.581504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.581518 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.609873 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.649787 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.684451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.684500 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.684514 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.684533 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.684544 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.692120 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.731306 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.776801 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.788182 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.788233 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.788242 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.788259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.788267 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.813179 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.853290 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.890650 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.890733 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.890947 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.890977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.890998 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.893075 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.935254 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.975470 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:10Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.994666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.994736 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.994751 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.994768 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:10 crc kubenswrapper[4943]: I1129 06:34:10.994779 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:10Z","lastTransitionTime":"2025-11-29T06:34:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.013888 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.053163 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.091303 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.096786 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.096819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.096832 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.096848 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.096858 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.132381 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.174927 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.199446 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.199478 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.199489 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.199502 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.199511 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.206662 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.301496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.301535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.301544 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.301558 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.301583 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.326817 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.326837 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:11 crc kubenswrapper[4943]: E1129 06:34:11.326949 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.326963 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:11 crc kubenswrapper[4943]: E1129 06:34:11.327404 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:11 crc kubenswrapper[4943]: E1129 06:34:11.327491 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.404693 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.404751 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.404761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.404777 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.404787 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.488355 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a" exitCode=0 Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.488402 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.504010 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.507646 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.507682 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.507694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.507716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.507728 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.521748 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.534026 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.545555 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.562242 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.579516 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.606390 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.610243 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.610275 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.610287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.610302 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.610312 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.624627 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.641638 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.660387 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.677629 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.693678 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.712510 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.712545 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.712554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.712601 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.712610 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.732958 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.777409 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.814888 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.814929 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.814938 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.814953 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.814963 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.825220 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:11Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.917666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.917722 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.917738 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.917761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:11 crc kubenswrapper[4943]: I1129 06:34:11.917777 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:11Z","lastTransitionTime":"2025-11-29T06:34:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.021279 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.021337 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.021346 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.021360 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.021370 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.124508 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.124554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.124593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.124610 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.124622 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.226775 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.226824 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.226844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.226867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.226883 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.329719 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.329756 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.329769 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.329786 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.329797 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.432818 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.432863 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.432875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.432892 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.432905 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.535286 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.535321 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.535329 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.535350 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.535359 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.638451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.638484 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.638494 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.638509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.638519 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.740043 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.740076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.740086 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.740098 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.740107 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.842556 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.842617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.842630 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.842647 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.842658 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.944822 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.944859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.944868 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.944884 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:12 crc kubenswrapper[4943]: I1129 06:34:12.944895 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:12Z","lastTransitionTime":"2025-11-29T06:34:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.047316 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.047583 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.047659 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.047761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.047832 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.101598 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.101745 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.101723396 +0000 UTC m=+36.031812159 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.102088 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102195 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102252 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.102242439 +0000 UTC m=+36.032331192 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.102345 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.102464 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102588 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102617 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102636 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102648 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102684 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.102661099 +0000 UTC m=+36.032749882 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.102711 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.10269988 +0000 UTC m=+36.032788743 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.150451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.150722 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.150815 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.150913 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.150996 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.203445 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.203703 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.203752 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.203764 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.203842 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.203819494 +0000 UTC m=+36.133908307 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.253204 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.253254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.253266 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.253294 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.253308 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.327447 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.327605 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.327691 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.327762 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.327839 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:13 crc kubenswrapper[4943]: E1129 06:34:13.327925 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.355275 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.355316 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.355325 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.355341 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.355350 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.456946 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.456980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.456988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.457000 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.457010 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.496731 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a" exitCode=0 Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.496799 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.500599 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.518548 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.533617 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.543881 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.556104 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.559410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.559448 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.559457 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.559471 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.559494 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.567177 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.577426 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.590537 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.604374 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.617223 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.634303 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.656202 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.661647 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.661675 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.661686 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.661706 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.661717 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.667221 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.679408 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.692402 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.704421 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:13Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.764708 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.764755 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.764765 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.764779 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.764790 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.867705 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.867750 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.867758 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.867772 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.867783 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.970869 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.970918 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.970929 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.970945 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:13 crc kubenswrapper[4943]: I1129 06:34:13.970957 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:13Z","lastTransitionTime":"2025-11-29T06:34:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.073840 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.074133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.074259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.074359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.074449 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.177104 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.177444 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.177546 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.177674 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.177760 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.279743 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.279782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.279792 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.279821 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.279841 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.382850 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.382889 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.382898 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.382912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.382921 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.484681 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.484724 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.484798 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.484819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.484832 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.547880 4943 generic.go:334] "Generic (PLEG): container finished" podID="ae851705-b905-4caa-932a-345918c2d3f7" containerID="dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738" exitCode=0 Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.547923 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerDied","Data":"dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.562585 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.577456 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.588677 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.588726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.588739 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.588757 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.588768 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.596842 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.610715 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.623058 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.637740 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.650014 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.664879 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.678775 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.690978 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.691028 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.691041 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.691108 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.691125 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.695651 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.717421 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.731910 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.746885 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.759287 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.769233 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:14Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.792977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.793025 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.793039 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.793060 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.793071 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.895585 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.895639 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.895657 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.895680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.895695 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.998025 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.998061 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.998069 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.998083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:14 crc kubenswrapper[4943]: I1129 06:34:14.998092 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:14Z","lastTransitionTime":"2025-11-29T06:34:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.100815 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.100855 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.100865 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.100878 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.100890 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.203535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.203577 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.203585 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.203598 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.203607 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.307103 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.307147 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.307160 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.307179 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.307192 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.326922 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:15 crc kubenswrapper[4943]: E1129 06:34:15.327055 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.327488 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:15 crc kubenswrapper[4943]: E1129 06:34:15.327601 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.327647 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:15 crc kubenswrapper[4943]: E1129 06:34:15.327686 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.340591 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.354093 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.365450 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.378072 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.406190 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.411220 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.411254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.411265 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.411280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.411289 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.435790 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.449905 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.469018 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.482108 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.493376 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.508298 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.513665 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.513702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.513711 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.513726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.513737 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.520832 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.533051 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.544001 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.552931 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.553186 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.554911 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.558059 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" event={"ID":"ae851705-b905-4caa-932a-345918c2d3f7","Type":"ContainerStarted","Data":"0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.565647 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.573074 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.580419 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.594356 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.609309 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.615581 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.615619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.615630 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.615644 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.615653 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.621800 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.634729 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.644723 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.658118 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.679333 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.693514 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.702713 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.714651 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.719207 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.719242 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.719251 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.719267 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.719282 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.727514 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.739446 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.756460 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.766414 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.779035 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.791501 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.802230 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.813539 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.821492 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.821527 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.821537 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.821551 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.821582 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.825978 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.838146 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.853254 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.870989 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.882547 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.892614 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.904837 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.914707 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.924403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.924437 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.924446 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.924461 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.924471 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:15Z","lastTransitionTime":"2025-11-29T06:34:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.926077 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:15 crc kubenswrapper[4943]: I1129 06:34:15.944866 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.027009 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.027064 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.027076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.027094 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.027107 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.051443 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.129441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.129475 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.129483 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.129496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.129506 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.232019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.232056 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.232069 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.232083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.232093 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.334725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.334779 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.334794 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.334812 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.334824 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.438057 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.438825 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.438955 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.439041 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.439055 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.541745 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.541795 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.541808 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.541824 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.542086 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.561222 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.581627 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.595128 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.611426 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.625956 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.643813 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.643859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.643871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.643886 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.643896 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.645736 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.658614 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.671653 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.681183 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.694343 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.709706 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.723428 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.737515 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.746643 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.746686 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.746697 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.746715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.746729 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.750864 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.765068 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.785882 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.797433 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:16Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.849216 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.849263 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.849274 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.849291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.849303 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.951299 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.951331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.951341 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.951354 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:16 crc kubenswrapper[4943]: I1129 06:34:16.951362 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:16Z","lastTransitionTime":"2025-11-29T06:34:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.053397 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.053434 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.053442 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.053456 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.053465 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.155331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.155375 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.155385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.155400 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.155412 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.258312 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.258368 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.258384 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.258407 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.258424 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.326865 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:17 crc kubenswrapper[4943]: E1129 06:34:17.326997 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.326882 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:17 crc kubenswrapper[4943]: E1129 06:34:17.327070 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.326862 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:17 crc kubenswrapper[4943]: E1129 06:34:17.327121 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.361259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.361303 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.361312 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.361328 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.361339 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.463702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.463740 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.463749 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.463764 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.463773 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565145 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/0.log" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565288 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565350 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565367 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.565380 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.568031 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1" exitCode=1 Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.568086 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.570247 4943 scope.go:117] "RemoveContainer" containerID="849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.584382 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.598337 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.609945 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.620843 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.633777 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.652326 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.661496 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.668194 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.668234 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.668246 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.668263 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.668272 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.673492 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.686782 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.699533 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.719102 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.732251 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.742437 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.758693 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.770237 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.770264 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.770273 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.770286 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.770294 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.773596 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:17Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.872287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.872315 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.872323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.872335 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.872344 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.989056 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.989109 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.989124 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.989145 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:17 crc kubenswrapper[4943]: I1129 06:34:17.989159 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:17Z","lastTransitionTime":"2025-11-29T06:34:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.091539 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.091584 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.091614 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.091631 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.091643 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.193652 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.193691 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.193699 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.193714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.193724 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.295663 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.295701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.295710 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.295725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.295735 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.398410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.398452 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.398462 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.398476 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.398485 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.460143 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.460188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.460196 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.460210 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.460219 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.472128 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.476455 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.476504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.476512 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.476527 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.476537 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.493695 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.498418 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.498461 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.498470 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.498485 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.498495 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.510840 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.514383 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.514422 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.514432 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.514447 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.514462 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.525887 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.529524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.529558 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.529570 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.529601 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.529614 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.541058 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: E1129 06:34:18.541224 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.542404 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.542438 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.542449 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.542464 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.542474 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.573362 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/0.log" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.576118 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.576395 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.597780 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.612998 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.625785 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.637414 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.645381 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.645452 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.645466 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.645482 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.645494 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.647816 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.659102 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.669850 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.682422 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.701402 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.705948 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp"] Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.706334 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.707742 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.708397 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.712712 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.725500 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.735709 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.747294 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.747331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.747342 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.747358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.747368 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.748330 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.761213 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.773828 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.784340 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.796505 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.796556 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.796599 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.796620 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc4xk\" (UniqueName: \"kubernetes.io/projected/eeaa4eb3-534c-4088-bd56-6beb70d968e7-kube-api-access-fc4xk\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.797456 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.808954 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.824001 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.836294 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.848358 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.852615 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.852651 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.852660 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.852680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.852690 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.864886 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.877066 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.891084 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.897350 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.897398 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.897416 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.897432 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc4xk\" (UniqueName: \"kubernetes.io/projected/eeaa4eb3-534c-4088-bd56-6beb70d968e7-kube-api-access-fc4xk\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.898089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.898100 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.904991 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeaa4eb3-534c-4088-bd56-6beb70d968e7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.908540 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.913077 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc4xk\" (UniqueName: \"kubernetes.io/projected/eeaa4eb3-534c-4088-bd56-6beb70d968e7-kube-api-access-fc4xk\") pod \"ovnkube-control-plane-749d76644c-svhjp\" (UID: \"eeaa4eb3-534c-4088-bd56-6beb70d968e7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.920663 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.930168 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.939887 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.950876 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.956628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.956666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.956675 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.956689 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.956698 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:18Z","lastTransitionTime":"2025-11-29T06:34:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.962647 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:18 crc kubenswrapper[4943]: I1129 06:34:18.978376 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:18Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.017861 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" Nov 29 06:34:19 crc kubenswrapper[4943]: W1129 06:34:19.029108 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeaa4eb3_534c_4088_bd56_6beb70d968e7.slice/crio-b714b239b0b2a899d99438b83df54b614bcfabdd4bdb26049bfa53c577a56343 WatchSource:0}: Error finding container b714b239b0b2a899d99438b83df54b614bcfabdd4bdb26049bfa53c577a56343: Status 404 returned error can't find the container with id b714b239b0b2a899d99438b83df54b614bcfabdd4bdb26049bfa53c577a56343 Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.060096 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.060140 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.060149 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.060165 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.060174 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.162191 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.162228 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.162240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.162255 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.162266 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.264540 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.264618 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.264628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.264642 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.264652 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.327425 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.327472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:19 crc kubenswrapper[4943]: E1129 06:34:19.327542 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.327472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:19 crc kubenswrapper[4943]: E1129 06:34:19.327616 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:19 crc kubenswrapper[4943]: E1129 06:34:19.327675 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.366348 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.366385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.366396 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.366411 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.366420 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.469049 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.469081 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.469089 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.469102 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.469111 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.571303 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.571366 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.571383 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.571406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.571423 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.583037 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/1.log" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.584236 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/0.log" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.588848 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e" exitCode=1 Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.588939 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.588985 4943 scope.go:117] "RemoveContainer" containerID="849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.590406 4943 scope.go:117] "RemoveContainer" containerID="7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e" Nov 29 06:34:19 crc kubenswrapper[4943]: E1129 06:34:19.590803 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.591256 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" event={"ID":"eeaa4eb3-534c-4088-bd56-6beb70d968e7","Type":"ContainerStarted","Data":"b714b239b0b2a899d99438b83df54b614bcfabdd4bdb26049bfa53c577a56343"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.624668 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.645956 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.666857 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.674302 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.674370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.674392 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.674420 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.674444 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.683623 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.696296 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.715890 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.729849 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.742679 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.755975 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.766893 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.777172 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.777240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.777257 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.777702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.777770 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.780410 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.790846 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-4wgtt"] Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.791270 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:19 crc kubenswrapper[4943]: E1129 06:34:19.791335 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.799230 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.809868 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.820856 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.832469 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.843216 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.863404 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.873526 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.880257 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.880288 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.880297 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.880310 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.880322 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.886219 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.896359 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.906231 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.911217 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92kj4\" (UniqueName: \"kubernetes.io/projected/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-kube-api-access-92kj4\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.911354 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.917336 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.927391 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.944132 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.955974 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.966275 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.978821 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.981939 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.981964 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.981973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.981986 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.981994 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:19Z","lastTransitionTime":"2025-11-29T06:34:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:19 crc kubenswrapper[4943]: I1129 06:34:19.988908 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:19Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.006411 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.012441 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92kj4\" (UniqueName: \"kubernetes.io/projected/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-kube-api-access-92kj4\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.012482 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:20 crc kubenswrapper[4943]: E1129 06:34:20.012595 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:20 crc kubenswrapper[4943]: E1129 06:34:20.012650 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:20.512635831 +0000 UTC m=+35.442724584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.017069 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.026676 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92kj4\" (UniqueName: \"kubernetes.io/projected/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-kube-api-access-92kj4\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.028141 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.038765 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.049783 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.085548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.085614 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.085628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.085644 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.085654 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.187955 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.188010 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.188021 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.188037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.188049 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.290097 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.290133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.290143 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.290155 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.290164 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.392212 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.392245 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.392253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.392266 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.392275 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.494340 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.494367 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.494375 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.494387 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.494396 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.516644 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:20 crc kubenswrapper[4943]: E1129 06:34:20.516764 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:20 crc kubenswrapper[4943]: E1129 06:34:20.516821 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:21.516806114 +0000 UTC m=+36.446894867 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.584043 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596318 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" event={"ID":"eeaa4eb3-534c-4088-bd56-6beb70d968e7","Type":"ContainerStarted","Data":"0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596366 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596377 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" event={"ID":"eeaa4eb3-534c-4088-bd56-6beb70d968e7","Type":"ContainerStarted","Data":"a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596390 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596433 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.596458 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.598312 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/1.log" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.598513 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.601532 4943 scope.go:117] "RemoveContainer" containerID="7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e" Nov 29 06:34:20 crc kubenswrapper[4943]: E1129 06:34:20.601705 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.609890 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.622627 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.635937 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.648755 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.659710 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.672974 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.683123 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.698643 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.698681 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.698691 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.698705 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.698715 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.699484 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.717327 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.732875 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.743497 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.754815 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.767655 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.782389 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.801691 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.801724 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.801767 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.801781 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.801791 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.813005 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://849976d278f58e8c60cc3104ceb519948b158d1b3c1d6b6c180bc87d9186c7d1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:17Z\\\",\\\"message\\\":\\\"vent handler 6 for removal\\\\nI1129 06:34:17.010407 6243 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 06:34:17.010417 6243 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 06:34:17.010747 6243 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 06:34:17.010797 6243 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011674 6243 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011739 6243 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011798 6243 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 06:34:17.011818 6243 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.011870 6243 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 06:34:17.012397 6243 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.830803 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.847034 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.860902 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.874539 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.886022 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.896973 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.903463 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.903491 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.903499 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.903511 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.903519 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:20Z","lastTransitionTime":"2025-11-29T06:34:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.907750 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.918118 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.926793 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.937812 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.956004 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.972524 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.982286 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:20 crc kubenswrapper[4943]: I1129 06:34:20.993653 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:20Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005154 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:21Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005465 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005512 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005541 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.005553 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.019742 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:21Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.036878 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:21Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.046645 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:21Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.108927 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.108968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.108979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.108995 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.109006 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.121348 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.121475 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121497 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:37.121467215 +0000 UTC m=+52.051556008 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121561 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.121630 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121708 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:37.121629919 +0000 UTC m=+52.051718792 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.121747 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121803 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121847 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:37.121838335 +0000 UTC m=+52.051927078 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121891 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121919 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121930 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.121958 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:37.121949548 +0000 UTC m=+52.052038301 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.211473 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.211501 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.211509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.211521 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.211529 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.223124 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.223260 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.223286 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.223297 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.223350 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:34:37.223334227 +0000 UTC m=+52.153422980 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.313662 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.313749 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.313762 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.313790 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.313800 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.327280 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.327322 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.327280 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.327558 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.327407 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.327624 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.327693 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.327759 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.416528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.416565 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.416574 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.416599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.416607 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.519226 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.519320 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.519347 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.519377 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.519397 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.526028 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.526232 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: E1129 06:34:21.526355 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:23.526326079 +0000 UTC m=+38.456414862 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.621694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.621733 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.621769 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.621786 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.621800 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.724423 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.724471 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.724495 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.724516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.724531 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.826920 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.826957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.826968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.826984 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.826996 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.929308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.929365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.929378 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.929398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:21 crc kubenswrapper[4943]: I1129 06:34:21.929411 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:21Z","lastTransitionTime":"2025-11-29T06:34:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.032184 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.032225 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.032235 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.032250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.032261 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.135232 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.135281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.135291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.135305 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.135313 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.237838 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.238124 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.238133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.238145 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.238154 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.340673 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.340714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.340725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.340739 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.340750 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.442830 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.442870 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.442884 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.442901 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.442915 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.550002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.550037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.550050 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.550068 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.550161 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.652884 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.653484 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.653644 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.653739 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.653812 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.757169 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.757375 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.757469 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.757545 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.757744 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.859614 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.859671 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.859682 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.859695 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.859705 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.962408 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.962455 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.962471 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.962489 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:22 crc kubenswrapper[4943]: I1129 06:34:22.962500 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:22Z","lastTransitionTime":"2025-11-29T06:34:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.064712 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.064765 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.064780 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.064799 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.064811 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.167166 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.167202 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.167210 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.167224 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.167233 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.269223 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.269255 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.269266 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.269281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.269292 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.327373 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.327557 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.327848 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.327904 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.328006 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.328013 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.328169 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.328283 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.372811 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.373051 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.373309 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.373375 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.373440 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.475484 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.475529 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.475539 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.475553 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.475586 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.541913 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.542055 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:23 crc kubenswrapper[4943]: E1129 06:34:23.542108 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:27.542093488 +0000 UTC m=+42.472182241 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.577432 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.577734 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.577875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.577989 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.578091 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.680431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.680468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.680476 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.680490 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.680499 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.783011 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.783045 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.783054 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.783069 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.783077 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.885218 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.885253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.885261 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.885275 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.885293 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.987434 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.987485 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.987494 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.987509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:23 crc kubenswrapper[4943]: I1129 06:34:23.987519 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:23Z","lastTransitionTime":"2025-11-29T06:34:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.090147 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.090190 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.090202 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.090219 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.090232 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.191964 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.191999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.192007 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.192020 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.192029 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.293717 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.293760 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.293772 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.293788 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.293799 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.395840 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.395876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.395889 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.395905 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.395916 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.498839 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.498880 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.498889 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.498904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.498916 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.601632 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.601667 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.601676 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.601690 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.601699 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.703806 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.703848 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.703860 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.703876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.703887 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.806326 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.806362 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.806413 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.806430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.806441 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.909558 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.909629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.909645 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.909661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:24 crc kubenswrapper[4943]: I1129 06:34:24.909675 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:24Z","lastTransitionTime":"2025-11-29T06:34:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.011935 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.011974 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.011982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.012000 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.012014 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.114102 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.114142 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.114153 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.114168 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.114180 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.216339 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.216374 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.216386 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.216400 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.216412 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.318267 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.318308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.318319 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.318333 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.318343 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.326683 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.326706 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.326737 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:25 crc kubenswrapper[4943]: E1129 06:34:25.326791 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.326805 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:25 crc kubenswrapper[4943]: E1129 06:34:25.326893 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:25 crc kubenswrapper[4943]: E1129 06:34:25.326942 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:25 crc kubenswrapper[4943]: E1129 06:34:25.327012 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.338496 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.347687 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.361272 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.372324 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.384093 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.396132 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.408142 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.419120 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.420637 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.420706 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.420717 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.420732 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.420761 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.429599 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.442086 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.460096 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.469103 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.479848 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.489856 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.500055 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.516207 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.522389 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.522427 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.522439 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.522456 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.522466 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.526670 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:25Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.624133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.624193 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.624204 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.624218 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.624248 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.726345 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.726377 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.726385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.726398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.726407 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.828468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.828551 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.828624 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.828654 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.828675 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.932774 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.932823 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.932835 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.932855 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:25 crc kubenswrapper[4943]: I1129 06:34:25.932869 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:25Z","lastTransitionTime":"2025-11-29T06:34:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.035293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.035341 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.035350 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.035388 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.035398 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.138105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.138157 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.138170 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.138188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.138200 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.240273 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.240317 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.240328 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.240344 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.240355 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.345806 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.345848 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.345858 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.345873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.345883 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.448083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.448127 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.448139 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.448156 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.448168 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.549893 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.549938 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.549957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.549980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.549995 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.652801 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.652846 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.652857 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.652873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.652885 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.755858 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.755895 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.755908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.755925 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.755937 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.858680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.858719 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.858731 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.858747 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.858759 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.961547 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.961622 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.961635 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.961653 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:26 crc kubenswrapper[4943]: I1129 06:34:26.961668 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:26Z","lastTransitionTime":"2025-11-29T06:34:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.064425 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.064473 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.064482 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.064494 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.064502 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.166962 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.166997 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.167005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.167020 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.167029 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.269746 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.269797 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.269808 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.269826 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.269838 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.327150 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.327215 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.327169 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.327168 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.327297 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.327425 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.327698 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.327734 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.372729 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.372769 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.372803 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.372819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.372830 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.475336 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.475394 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.475402 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.475414 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.475424 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.577988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.578030 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.578039 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.578053 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.578064 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.583529 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.583734 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:27 crc kubenswrapper[4943]: E1129 06:34:27.583853 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:35.583829242 +0000 UTC m=+50.513918075 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.679854 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.679888 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.679896 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.679909 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.679918 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.782802 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.782847 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.782856 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.782871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.782881 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.885740 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.885781 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.885793 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.885807 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.885816 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.987986 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.988051 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.988063 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.988080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:27 crc kubenswrapper[4943]: I1129 06:34:27.988090 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:27Z","lastTransitionTime":"2025-11-29T06:34:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.091549 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.091632 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.091651 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.091674 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.091695 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.194407 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.194459 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.194469 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.194490 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.194504 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.296554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.296631 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.296641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.296657 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.296667 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.398667 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.398714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.398724 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.398738 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.398749 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.501481 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.501516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.501527 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.501541 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.501551 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.605399 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.605821 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.605891 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.605968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.606050 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.708596 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.708642 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.708654 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.708669 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.708679 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.717844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.717874 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.717883 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.717896 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.717904 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.728733 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:28Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.731899 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.731930 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.731938 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.731950 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.731960 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.742705 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:28Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.746235 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.746287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.746297 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.746315 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.746327 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.758266 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:28Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.762841 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.763491 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.763527 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.763546 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.763556 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.775633 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:28Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.779142 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.779178 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.779188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.779200 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.779211 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.790375 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:28Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:28 crc kubenswrapper[4943]: E1129 06:34:28.790501 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.811353 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.811398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.811408 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.811422 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.811432 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.913892 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.913975 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.914010 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.914028 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:28 crc kubenswrapper[4943]: I1129 06:34:28.914040 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:28Z","lastTransitionTime":"2025-11-29T06:34:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.017071 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.017135 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.017146 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.017165 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.017178 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.119648 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.119695 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.119711 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.119728 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.119739 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.222674 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.222759 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.222771 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.222795 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.222810 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.325759 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.325825 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.325834 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.325848 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.325858 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.326451 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.326500 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:29 crc kubenswrapper[4943]: E1129 06:34:29.326531 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.326593 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.326652 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:29 crc kubenswrapper[4943]: E1129 06:34:29.326786 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:29 crc kubenswrapper[4943]: E1129 06:34:29.326849 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:29 crc kubenswrapper[4943]: E1129 06:34:29.326948 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.429410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.429522 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.429543 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.429624 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.429658 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.533730 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.533782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.533793 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.533812 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.533826 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.636309 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.636358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.636370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.636389 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.636399 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.739696 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.739767 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.739787 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.739816 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.739838 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.843896 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.844066 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.844082 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.844105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.844146 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.946831 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.946869 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.946879 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.946900 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:29 crc kubenswrapper[4943]: I1129 06:34:29.946912 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:29Z","lastTransitionTime":"2025-11-29T06:34:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.050077 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.050671 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.050814 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.050846 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.050865 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.154142 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.154180 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.154188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.154203 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.154212 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.256679 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.256742 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.256752 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.256765 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.256776 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.359819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.359891 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.359908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.359938 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.359958 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.463123 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.463170 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.463182 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.463200 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.463212 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.566056 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.566118 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.566135 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.566161 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.566183 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.669046 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.669080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.669088 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.669101 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.669110 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.771256 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.771712 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.771915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.772228 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.772509 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.875156 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.875676 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.875761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.875875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.875964 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.978911 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.978953 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.978965 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.978983 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:30 crc kubenswrapper[4943]: I1129 06:34:30.978993 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:30Z","lastTransitionTime":"2025-11-29T06:34:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.082065 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.082205 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.082246 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.082265 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.082279 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.185293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.185362 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.185376 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.185400 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.185413 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.288331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.288388 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.288401 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.288428 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.288449 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.326577 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.326674 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.326560 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.326596 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:31 crc kubenswrapper[4943]: E1129 06:34:31.326773 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:31 crc kubenswrapper[4943]: E1129 06:34:31.326900 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:31 crc kubenswrapper[4943]: E1129 06:34:31.327018 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:31 crc kubenswrapper[4943]: E1129 06:34:31.327158 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.393672 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.393738 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.393746 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.393762 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.393775 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.497255 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.497300 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.497310 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.497330 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.497341 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.601335 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.601438 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.601472 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.601507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.601533 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.704329 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.704444 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.704463 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.704496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.704519 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.807502 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.807553 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.807584 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.807607 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.807628 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.910028 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.910083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.910094 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.910106 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:31 crc kubenswrapper[4943]: I1129 06:34:31.910139 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:31Z","lastTransitionTime":"2025-11-29T06:34:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.012248 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.012284 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.012291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.012305 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.012314 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.114925 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.114993 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.115007 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.115023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.115036 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.217904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.217945 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.217957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.217973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.217986 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.320191 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.320231 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.320241 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.320257 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.320267 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.422994 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.423085 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.423102 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.423122 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.423134 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.525915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.525994 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.526005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.526018 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.526029 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.631945 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.631980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.631988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.632001 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.632010 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.734581 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.734632 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.734644 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.734660 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.734672 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.837277 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.837313 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.837323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.837339 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.837352 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.940534 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.940608 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.940628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.940649 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:32 crc kubenswrapper[4943]: I1129 06:34:32.940661 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:32Z","lastTransitionTime":"2025-11-29T06:34:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.042903 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.042957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.042968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.042982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.042993 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.146032 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.146074 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.146082 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.146097 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.146109 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.249185 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.249244 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.249260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.249281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.249293 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.326877 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.327027 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:33 crc kubenswrapper[4943]: E1129 06:34:33.327088 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.327129 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.327144 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:33 crc kubenswrapper[4943]: E1129 06:34:33.327276 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:33 crc kubenswrapper[4943]: E1129 06:34:33.327754 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:33 crc kubenswrapper[4943]: E1129 06:34:33.327894 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.328336 4943 scope.go:117] "RemoveContainer" containerID="7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.352260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.352301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.352313 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.352332 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.352344 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.454956 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.455250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.455423 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.455617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.455898 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.559132 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.559172 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.559181 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.559196 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.559206 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.662083 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.662118 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.662129 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.662144 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.662154 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.764986 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.765030 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.765076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.765102 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.765113 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.868680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.868717 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.868726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.868742 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.868752 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.971552 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.971624 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.971645 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.971661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:33 crc kubenswrapper[4943]: I1129 06:34:33.971671 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:33Z","lastTransitionTime":"2025-11-29T06:34:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.074344 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.074381 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.074391 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.074406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.074418 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.176869 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.176903 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.176912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.176926 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.176935 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.279730 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.279776 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.279791 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.279812 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.279825 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.382171 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.382218 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.382226 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.382240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.382249 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.485157 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.485192 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.485202 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.485217 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.485227 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.588124 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.588153 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.588164 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.588176 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.588186 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.643658 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/1.log" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.647018 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.647445 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.662043 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.676428 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.689029 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.690729 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.690767 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.690777 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.690792 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.690801 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.711512 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.722754 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.733381 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.751151 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.768289 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.783005 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.794898 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.795103 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.795184 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.795289 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.795480 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.797785 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.812790 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.827604 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.840504 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.858189 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.883716 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.898478 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.898535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.898547 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.898586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.898601 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:34Z","lastTransitionTime":"2025-11-29T06:34:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.901424 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:34 crc kubenswrapper[4943]: I1129 06:34:34.916325 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:34Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.002245 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.002313 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.002326 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.002346 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.002361 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.105221 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.105258 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.105267 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.105280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.105303 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.208685 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.208735 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.208745 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.208761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.208770 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.313103 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.313183 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.313203 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.313236 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.313257 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.326916 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.326930 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.327355 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.327480 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.326994 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.326931 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.327591 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.327791 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.345777 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.363024 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.376557 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.405482 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.415924 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.415967 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.415980 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.416001 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.416015 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.423836 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.443245 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.461253 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.479151 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.492383 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.503797 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.518484 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.518524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.518533 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.518548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.518559 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.528764 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.551633 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.565921 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.585491 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.599081 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.617794 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.618086 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.618241 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:34:51.618192832 +0000 UTC m=+66.548281625 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.620798 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.620822 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.620832 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.620845 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.620854 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.623052 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.636117 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.652318 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/2.log" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.653154 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/1.log" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.655720 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" exitCode=1 Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.655772 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.655829 4943 scope.go:117] "RemoveContainer" containerID="7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.656311 4943 scope.go:117] "RemoveContainer" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" Nov 29 06:34:35 crc kubenswrapper[4943]: E1129 06:34:35.656446 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.671504 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.688803 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.705882 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.719042 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.722699 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.722763 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.722778 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.722801 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.722814 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.732538 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.745382 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.757640 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.770399 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.789374 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.814407 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.827407 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.827496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.827521 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.827553 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.827606 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.828484 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.838129 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.851379 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.863959 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.878480 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.897649 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fbe2c072f758357257a52ffcac4f1e5ec9025b68c902a99965600d911c51b2e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"message\\\":\\\"ncerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1129 06:34:18.341797 6382 lb_config.go:1031] Cluster endpoints for openshift-machine-api/control-plane-machine-set-operator for network=default are: map[]\\\\nI1129 06:34:18.341335 6382 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 06:34:18.341879 6382 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.907300 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:35Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.930400 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.930448 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.930457 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.930473 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:35 crc kubenswrapper[4943]: I1129 06:34:35.930483 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:35Z","lastTransitionTime":"2025-11-29T06:34:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.034199 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.034289 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.034311 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.034340 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.034364 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.137273 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.137315 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.137324 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.137338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.137347 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.240992 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.241076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.241117 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.241149 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.241166 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.343545 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.343606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.343615 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.343632 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.343642 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.446361 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.446686 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.446754 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.446848 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.446917 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.550023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.550342 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.550480 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.550714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.550870 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.653294 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.653359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.653371 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.653387 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.653399 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.660497 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/2.log" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.664529 4943 scope.go:117] "RemoveContainer" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" Nov 29 06:34:36 crc kubenswrapper[4943]: E1129 06:34:36.664691 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.685825 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.703298 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.716491 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.727427 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.741189 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.756256 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.756406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.756418 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.756477 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.756491 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.761115 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.775316 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.788472 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.801075 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.813004 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.838718 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.850042 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.859019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.859079 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.859090 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.859107 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.859118 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.864376 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.875921 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.885998 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.901879 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.913611 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:36Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.961687 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.961738 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.961748 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.961760 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:36 crc kubenswrapper[4943]: I1129 06:34:36.961768 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:36Z","lastTransitionTime":"2025-11-29T06:34:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.064634 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.064706 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.064716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.064730 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.064740 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.135317 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.135442 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.135491 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135605 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:35:09.135526932 +0000 UTC m=+84.065615685 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135669 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.135685 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135752 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:35:09.135729547 +0000 UTC m=+84.065818350 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135678 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135806 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135822 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135857 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135878 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:35:09.135867671 +0000 UTC m=+84.065956514 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.135906 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:35:09.135891541 +0000 UTC m=+84.065980314 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.167310 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.167346 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.167355 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.167370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.167380 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.236956 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.237096 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.237193 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.237204 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.237254 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:35:09.2372406 +0000 UTC m=+84.167329353 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.270091 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.270131 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.270141 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.270158 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.270171 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.326713 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.326887 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.326936 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.326954 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.327124 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.327227 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.326952 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:37 crc kubenswrapper[4943]: E1129 06:34:37.327385 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.372900 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.372973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.372995 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.373025 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.373050 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.476557 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.476782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.476819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.476849 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.476871 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.579068 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.579119 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.579133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.579155 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.579169 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.681915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.681977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.681987 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.682004 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.682015 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.784993 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.785047 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.785064 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.785080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.785090 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.887006 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.887058 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.887073 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.887091 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.887104 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.989424 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.989476 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.989490 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.989509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:37 crc kubenswrapper[4943]: I1129 06:34:37.989524 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:37Z","lastTransitionTime":"2025-11-29T06:34:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.092064 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.092107 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.092117 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.092132 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.092142 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.195153 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.195192 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.195204 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.195217 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.195226 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.297589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.297885 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.297957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.298023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.298081 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.400133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.400185 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.400194 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.400207 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.400217 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.503439 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.503515 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.503534 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.503631 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.503651 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.605802 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.605829 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.605837 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.605849 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.605857 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.708936 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.708993 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.709013 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.709037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.709051 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.812281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.812350 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.812369 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.812402 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.812423 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.878853 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.878950 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.878967 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.879007 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.879044 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.899827 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:38Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.905902 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.905965 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.905988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.906021 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.906044 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.926918 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:38Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.931260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.931308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.931324 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.931349 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.931364 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.949442 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:38Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.954292 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.954365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.954384 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.954409 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.954428 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.971104 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:38Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.976867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.977014 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.977143 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.977254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:38 crc kubenswrapper[4943]: I1129 06:34:38.977413 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:38Z","lastTransitionTime":"2025-11-29T06:34:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.998552 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:38Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:38 crc kubenswrapper[4943]: E1129 06:34:38.999096 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.001005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.001113 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.001214 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.001297 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.001370 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.103846 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.103882 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.103892 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.103908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.103920 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.206872 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.206949 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.206963 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.206982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.206996 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.309786 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.309828 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.309855 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.309894 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.309903 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.327166 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.327259 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.327185 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:39 crc kubenswrapper[4943]: E1129 06:34:39.327305 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:39 crc kubenswrapper[4943]: E1129 06:34:39.327379 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.327396 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:39 crc kubenswrapper[4943]: E1129 06:34:39.327487 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:39 crc kubenswrapper[4943]: E1129 06:34:39.327614 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.412076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.412116 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.412124 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.412139 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.412150 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.514715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.514748 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.514756 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.514770 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.514780 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.616887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.616945 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.616963 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.616988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.617007 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.719283 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.719329 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.719343 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.719360 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.719370 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.822016 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.822057 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.822071 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.822089 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.822102 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.924546 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.924639 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.924657 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.924680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:39 crc kubenswrapper[4943]: I1129 06:34:39.924701 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:39Z","lastTransitionTime":"2025-11-29T06:34:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.027219 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.027250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.027258 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.027272 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.027283 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130511 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130543 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130550 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130578 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130587 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.130654 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.145048 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.157276 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.170121 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.185656 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.202498 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.220604 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.233665 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.233713 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.233724 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.233744 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.233760 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.236636 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.251053 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.264710 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.279421 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.293080 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.311459 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.331649 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.336318 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.336374 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.336385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.336403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.336414 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.349392 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.361859 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.376551 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.395218 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.413453 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:40Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.439802 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.439844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.439853 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.439867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.439876 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.543645 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.543716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.543727 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.543745 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.543756 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.646290 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.646327 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.646336 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.646351 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.646362 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.748992 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.749049 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.749061 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.749076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.749086 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.852678 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.852717 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.852725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.852740 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.852749 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.955206 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.955649 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.955729 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.955803 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:40 crc kubenswrapper[4943]: I1129 06:34:40.955862 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:40Z","lastTransitionTime":"2025-11-29T06:34:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.058414 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.058447 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.058455 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.058468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.058479 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.161734 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.161792 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.161808 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.161832 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.161849 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.264470 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.264667 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.264770 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.264915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.265028 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.326743 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.326785 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:41 crc kubenswrapper[4943]: E1129 06:34:41.326872 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.326916 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:41 crc kubenswrapper[4943]: E1129 06:34:41.326995 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.326743 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:41 crc kubenswrapper[4943]: E1129 06:34:41.327065 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:41 crc kubenswrapper[4943]: E1129 06:34:41.327116 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.367486 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.367540 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.367552 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.367589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.367605 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.469886 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.469941 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.469953 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.469967 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.469979 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.572008 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.572090 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.572104 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.572122 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.572135 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.675066 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.675116 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.675128 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.675146 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.675160 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.777273 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.777330 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.777351 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.777371 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.777384 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.879876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.879928 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.879942 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.879962 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.879976 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.982331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.982374 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.982384 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.982402 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:41 crc kubenswrapper[4943]: I1129 06:34:41.982412 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:41Z","lastTransitionTime":"2025-11-29T06:34:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.084089 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.084130 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.084139 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.084154 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.084169 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.187078 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.187115 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.187128 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.187144 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.187157 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.289431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.289482 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.289540 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.289602 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.289620 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.392811 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.392873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.392891 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.392915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.392933 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.494794 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.494831 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.494839 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.494853 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.494863 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.596799 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.596835 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.596843 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.596856 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.596864 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.698882 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.698972 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.698989 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.699076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.699087 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.801604 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.801660 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.801672 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.801695 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.801708 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.904002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.904047 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.904056 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.904070 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:42 crc kubenswrapper[4943]: I1129 06:34:42.904084 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:42Z","lastTransitionTime":"2025-11-29T06:34:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.006067 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.006114 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.006126 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.006143 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.006156 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.109107 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.109164 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.109177 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.109198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.109214 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.212949 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.212999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.213009 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.213026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.213039 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.315806 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.315877 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.315890 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.315908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.315923 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.327278 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.327323 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.327345 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:43 crc kubenswrapper[4943]: E1129 06:34:43.327416 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.327287 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:43 crc kubenswrapper[4943]: E1129 06:34:43.327665 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:43 crc kubenswrapper[4943]: E1129 06:34:43.327713 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:43 crc kubenswrapper[4943]: E1129 06:34:43.327908 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.418422 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.418463 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.418479 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.418499 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.418537 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.521286 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.521327 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.521336 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.521348 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.521358 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.623725 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.623789 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.623799 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.623816 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.623828 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.726177 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.726224 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.726238 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.726252 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.726263 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.829706 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.829749 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.829761 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.829776 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.829789 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.932610 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.932682 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.932705 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.932736 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:43 crc kubenswrapper[4943]: I1129 06:34:43.932757 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:43Z","lastTransitionTime":"2025-11-29T06:34:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.035554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.035629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.035637 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.035651 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.035661 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.138247 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.138294 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.138302 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.138316 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.138326 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.240133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.240183 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.240194 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.240210 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.240221 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.342728 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.342768 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.342776 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.342789 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.342802 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.444817 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.444864 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.444875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.444890 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.444901 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.547390 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.547469 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.547491 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.547516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.547534 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.649828 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.649887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.649904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.649922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.649933 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.752180 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.752225 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.752236 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.752253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.752265 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.854133 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.854199 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.854208 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.854223 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.854231 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.956330 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.956365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.956373 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.956387 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:44 crc kubenswrapper[4943]: I1129 06:34:44.956398 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:44Z","lastTransitionTime":"2025-11-29T06:34:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.058280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.058360 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.058370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.058382 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.058411 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.160305 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.160337 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.160345 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.160359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.160369 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.262361 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.262410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.262418 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.262432 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.262441 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.326755 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:45 crc kubenswrapper[4943]: E1129 06:34:45.326918 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.327067 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:45 crc kubenswrapper[4943]: E1129 06:34:45.327169 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.327198 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.327317 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:45 crc kubenswrapper[4943]: E1129 06:34:45.327372 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:45 crc kubenswrapper[4943]: E1129 06:34:45.327440 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.342341 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.357147 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.364828 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.364867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.364877 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.364892 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.364902 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.369353 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.386767 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.397430 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.407143 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.420368 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.432607 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.443665 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.452221 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.462083 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.467455 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.467504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.467513 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.467529 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.467539 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.473445 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.485673 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.504852 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.528044 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.541814 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.555955 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.566158 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:45Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.569989 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.570025 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.570035 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.570076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.570089 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.673198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.673248 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.673260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.673279 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.673291 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.776000 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.776037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.776048 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.776064 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.776075 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.879480 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.879523 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.879534 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.879551 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.879583 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.982869 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.982928 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.982940 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.982963 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:45 crc kubenswrapper[4943]: I1129 06:34:45.982975 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:45Z","lastTransitionTime":"2025-11-29T06:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.086973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.087024 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.087035 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.087055 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.087067 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.190875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.190923 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.190933 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.190949 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.190961 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.294068 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.294138 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.294152 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.294175 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.294193 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.398629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.398716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.398753 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.398777 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.398791 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.502514 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.502582 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.502593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.502617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.502629 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.605183 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.605227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.605244 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.605264 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.605275 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.707917 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.707968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.707979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.707997 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.708010 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.810847 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.810904 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.810915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.810936 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.810949 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.914500 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.914534 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.914544 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.914559 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:46 crc kubenswrapper[4943]: I1129 06:34:46.914585 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:46Z","lastTransitionTime":"2025-11-29T06:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.018398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.018455 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.018471 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.018494 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.018512 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.122240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.122412 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.122436 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.122510 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.122530 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.231384 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.232063 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.232249 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.232412 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.232523 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.329684 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.330404 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.330371 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:47 crc kubenswrapper[4943]: E1129 06:34:47.330684 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.330812 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:47 crc kubenswrapper[4943]: E1129 06:34:47.331065 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:47 crc kubenswrapper[4943]: E1129 06:34:47.331253 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:47 crc kubenswrapper[4943]: E1129 06:34:47.330953 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.334531 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.334598 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.334611 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.334630 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.334642 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.437702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.437767 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.437782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.437802 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.437816 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.539468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.539757 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.539924 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.540107 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.540212 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.643319 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.643370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.643382 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.643400 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.643412 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.745887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.745937 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.745948 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.745963 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.745975 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.848623 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.848667 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.848682 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.848697 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.848707 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.950654 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.950692 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.950700 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.950713 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:47 crc kubenswrapper[4943]: I1129 06:34:47.950722 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:47Z","lastTransitionTime":"2025-11-29T06:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.052635 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.052676 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.052686 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.052701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.052712 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.155505 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.155618 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.155641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.155666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.155683 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.258272 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.258511 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.258612 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.258687 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.258755 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.362002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.362819 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.362959 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.363098 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.363233 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.466371 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.466415 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.466428 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.466443 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.466454 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.568824 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.568855 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.568865 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.568882 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.568892 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.671149 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.671198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.671211 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.671227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.671239 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.774664 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.774715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.774723 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.774743 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.774753 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.876528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.876586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.876597 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.876614 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.876629 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.978393 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.978437 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.978445 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.978459 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:48 crc kubenswrapper[4943]: I1129 06:34:48.978469 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:48Z","lastTransitionTime":"2025-11-29T06:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.080121 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.080162 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.080171 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.080184 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.080192 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.128498 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.128554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.128584 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.128599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.128607 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.140429 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:49Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.143777 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.143821 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.143832 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.143846 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.143858 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.156484 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:49Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.161073 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.161109 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.161118 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.161131 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.161140 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.171691 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:49Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.175220 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.175250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.175261 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.175275 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.175284 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.186029 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:49Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.189719 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.189762 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.189773 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.189790 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.189803 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.200702 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:49Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.200999 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.202547 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.202609 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.202619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.202634 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.202646 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.304925 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.304975 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.304990 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.305010 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.305023 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.327388 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.327444 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.327452 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.327544 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.327766 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.327894 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.328058 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:49 crc kubenswrapper[4943]: E1129 06:34:49.328267 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.407752 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.407792 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.407802 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.407817 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.407838 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.509829 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.509859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.509875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.509891 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.509901 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.612304 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.612694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.612839 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.612979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.613114 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.715586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.716937 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.716952 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.716965 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.716974 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.818918 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.818995 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.819008 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.819020 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.819030 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.921477 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.921522 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.921535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.921552 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:49 crc kubenswrapper[4943]: I1129 06:34:49.921581 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:49Z","lastTransitionTime":"2025-11-29T06:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.024702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.024747 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.024759 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.024776 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.024791 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.126599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.126638 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.126648 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.126663 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.126672 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.229301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.229348 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.229363 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.229385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.229402 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.332867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.332922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.332935 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.332951 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.332963 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.512820 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.512865 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.512879 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.512902 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.512920 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.615086 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.615118 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.615128 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.615140 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.615149 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.716976 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.717011 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.717020 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.717035 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.717048 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.819291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.819331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.819340 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.819353 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.819362 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.925340 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.925403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.925421 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.925445 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:50 crc kubenswrapper[4943]: I1129 06:34:50.925462 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:50Z","lastTransitionTime":"2025-11-29T06:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.028542 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.028596 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.028606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.028617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.028627 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.131002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.131049 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.131063 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.131081 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.131092 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.233876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.233937 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.233955 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.233977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.233995 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.327249 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.327283 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.327359 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.327407 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.327430 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.327496 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.327596 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.327669 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.336082 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.336106 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.336116 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.336129 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.336139 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.438542 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.438589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.438599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.438612 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.438621 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.540908 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.540949 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.540960 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.540976 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.540987 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.628242 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.628376 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:51 crc kubenswrapper[4943]: E1129 06:34:51.628457 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:35:23.628439344 +0000 UTC m=+98.558528097 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.643286 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.643324 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.643338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.643354 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.643365 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.745793 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.745822 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.745831 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.745843 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.745852 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.847953 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.847990 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.848000 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.848016 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.848028 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.950066 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.950117 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.950126 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.950140 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:51 crc kubenswrapper[4943]: I1129 06:34:51.950149 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:51Z","lastTransitionTime":"2025-11-29T06:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.052127 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.052169 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.052177 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.052193 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.052204 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.154767 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.154818 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.154833 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.154851 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.154863 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.257312 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.257362 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.257372 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.257386 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.257395 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.327972 4943 scope.go:117] "RemoveContainer" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" Nov 29 06:34:52 crc kubenswrapper[4943]: E1129 06:34:52.328130 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.359523 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.359578 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.359591 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.359604 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.359615 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.462194 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.462234 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.462242 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.462256 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.462266 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.564372 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.564433 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.564441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.564454 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.564463 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.666291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.666330 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.666343 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.666359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.666371 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.768512 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.768549 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.768576 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.768592 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.768605 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.871447 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.871485 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.871494 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.871507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.871515 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.973179 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.973214 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.973223 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.973236 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:52 crc kubenswrapper[4943]: I1129 06:34:52.973246 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:52Z","lastTransitionTime":"2025-11-29T06:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.075777 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.075833 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.075844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.075859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.075868 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.177828 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.177863 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.177871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.177888 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.177897 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.284661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.284703 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.284721 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.284738 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.284751 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.327391 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.327430 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:53 crc kubenswrapper[4943]: E1129 06:34:53.327514 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.327541 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.327391 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:53 crc kubenswrapper[4943]: E1129 06:34:53.327722 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:53 crc kubenswrapper[4943]: E1129 06:34:53.327775 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:53 crc kubenswrapper[4943]: E1129 06:34:53.327834 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.388441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.388481 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.388496 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.388518 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.388537 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.491475 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.491535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.491553 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.491604 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.491623 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.594178 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.594233 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.594245 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.594261 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.594273 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.696342 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.696390 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.696402 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.696418 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.696430 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.716094 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/0.log" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.716142 4943 generic.go:334] "Generic (PLEG): container finished" podID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" containerID="9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69" exitCode=1 Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.716171 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerDied","Data":"9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.716532 4943 scope.go:117] "RemoveContainer" containerID="9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.728070 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.739993 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.752099 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.772899 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.786062 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.797070 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.798887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.798926 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.798937 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.798952 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.798961 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.807210 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.819426 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.834381 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.847275 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.864304 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.878809 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.892059 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.901675 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.901726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.901740 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.901759 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.901774 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:53Z","lastTransitionTime":"2025-11-29T06:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.903307 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.920024 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.941129 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.958171 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:53 crc kubenswrapper[4943]: I1129 06:34:53.972088 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:53Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.003782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.003842 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.003855 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.003876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.003887 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.105701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.105744 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.105756 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.105773 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.105798 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.207669 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.207701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.207709 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.207722 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.207737 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.310331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.310365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.310374 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.310387 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.310408 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.413782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.413835 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.413846 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.413862 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.413875 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.515656 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.515691 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.515702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.515716 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.515729 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.618291 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.618346 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.618355 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.618369 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.618378 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.719801 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.719851 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.719862 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.719876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.719886 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.721089 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/0.log" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.721144 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.737783 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.747163 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.757703 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.769911 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.783791 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.795113 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.804335 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.814878 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.822142 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.822171 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.822180 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.822212 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.822224 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.825401 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.835459 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.847735 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.864749 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.876308 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.885947 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.894585 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.906678 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.917264 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.925953 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.925990 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.926001 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.926017 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.926030 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:54Z","lastTransitionTime":"2025-11-29T06:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:54 crc kubenswrapper[4943]: I1129 06:34:54.930133 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:54Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.027589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.027629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.027639 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.027655 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.027667 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.130431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.130487 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.130499 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.130518 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.130531 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.233229 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.233327 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.233352 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.233381 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.233406 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.326723 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.326768 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.326797 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:55 crc kubenswrapper[4943]: E1129 06:34:55.326906 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.326959 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:55 crc kubenswrapper[4943]: E1129 06:34:55.327068 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:55 crc kubenswrapper[4943]: E1129 06:34:55.327137 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:55 crc kubenswrapper[4943]: E1129 06:34:55.327196 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.335666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.335751 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.335775 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.335805 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.335827 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.343668 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.355510 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.366596 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.386609 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.408682 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.423654 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.436405 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.438112 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.438172 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.438186 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.438202 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.438213 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.447212 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.462852 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.474615 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.484705 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.503883 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.513582 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.524166 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.537328 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.548248 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.555827 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.555850 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.555858 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.555871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.555881 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.558467 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.566448 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:55Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.658167 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.658193 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.658200 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.658213 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.658221 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.760915 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.760950 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.760959 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.760973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.760982 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.863654 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.863865 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.863955 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.864026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.864086 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.966381 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.966679 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.966765 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.966837 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:55 crc kubenswrapper[4943]: I1129 06:34:55.966897 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:55Z","lastTransitionTime":"2025-11-29T06:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.068858 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.068918 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.068930 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.068968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.068978 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.170922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.170957 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.170967 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.170981 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.170993 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.272940 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.272972 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.272981 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.272995 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.273004 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.375121 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.375381 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.375460 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.375548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.375662 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.478014 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.478048 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.478059 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.478074 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.478085 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.579906 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.579977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.579988 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.580005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.580016 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.682688 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.682728 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.682739 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.682752 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.682763 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.784968 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.784999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.785009 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.785023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.785032 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.887244 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.887272 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.887281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.887293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.887302 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.988829 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.988872 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.988882 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.988895 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:56 crc kubenswrapper[4943]: I1129 06:34:56.988906 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:56Z","lastTransitionTime":"2025-11-29T06:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.091288 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.091323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.091331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.091344 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.091357 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.193647 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.193709 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.193721 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.193775 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.193808 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.296246 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.296292 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.296301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.296316 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.296324 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.327172 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.327215 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.327146 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.327177 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:57 crc kubenswrapper[4943]: E1129 06:34:57.327313 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:57 crc kubenswrapper[4943]: E1129 06:34:57.327375 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:57 crc kubenswrapper[4943]: E1129 06:34:57.327462 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:57 crc kubenswrapper[4943]: E1129 06:34:57.327528 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.398875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.398905 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.398914 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.398927 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.398936 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.501810 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.501856 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.501867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.501883 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.501895 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.603992 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.604039 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.604048 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.604060 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.604068 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.706433 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.706507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.706524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.706541 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.706552 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.809250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.809300 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.809309 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.809324 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.809335 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.911537 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.911606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.911618 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.911636 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:57 crc kubenswrapper[4943]: I1129 06:34:57.911647 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:57Z","lastTransitionTime":"2025-11-29T06:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.014301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.014348 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.014358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.014378 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.014390 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.117230 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.117283 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.117293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.117308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.117318 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.220042 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.220095 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.220106 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.220168 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.220184 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.322713 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.322759 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.322769 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.322784 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.322793 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.425044 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.425085 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.425094 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.425109 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.425118 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.528101 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.528146 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.528156 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.528173 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.528184 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.630703 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.630757 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.630771 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.630790 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.630803 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.733749 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.733787 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.733796 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.733815 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.733826 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.835630 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.835695 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.835707 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.835722 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.835733 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.937772 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.937810 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.937818 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.937831 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:58 crc kubenswrapper[4943]: I1129 06:34:58.937840 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:58Z","lastTransitionTime":"2025-11-29T06:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.039911 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.039964 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.039982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.040002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.040012 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.142525 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.142576 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.142586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.142599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.142608 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.244257 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.244301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.244310 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.244323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.244333 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.326897 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.326993 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.327025 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.327065 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.327093 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.327188 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.327227 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.327341 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.346450 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.346488 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.346500 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.346518 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.346534 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.449274 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.449310 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.449319 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.449332 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.449342 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.516417 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.516643 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.516717 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.516799 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.516862 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.531465 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:59Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.535825 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.535922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.535985 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.536047 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.536113 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.548361 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:59Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.551281 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.551377 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.551436 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.551504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.551587 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.562217 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:59Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.565215 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.565290 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.565308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.565333 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.565350 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.578442 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:59Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.582820 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.582914 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.582978 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.583050 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.583121 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.594350 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:34:59Z is after 2025-08-24T17:21:41Z" Nov 29 06:34:59 crc kubenswrapper[4943]: E1129 06:34:59.594458 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.595641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.595658 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.595666 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.595684 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.595700 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.697624 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.697679 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.697692 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.697712 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.697728 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.800644 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.800731 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.800748 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.800768 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.800782 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.903442 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.903543 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.903647 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.903692 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:34:59 crc kubenswrapper[4943]: I1129 06:34:59.903719 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:34:59Z","lastTransitionTime":"2025-11-29T06:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.006027 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.006114 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.006144 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.006172 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.006192 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.107984 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.108037 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.108053 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.108073 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.108091 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.210505 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.210585 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.210599 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.210612 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.210620 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.313229 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.313301 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.313322 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.313352 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.313373 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.415542 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.415610 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.415622 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.415635 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.415649 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.518516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.518586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.518601 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.518619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.518631 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.622034 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.622077 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.622090 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.622112 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.622126 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.725418 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.725459 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.725472 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.725495 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.725507 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.828285 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.828359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.828380 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.828410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.828433 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.930891 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.930926 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.930937 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.930951 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:00 crc kubenswrapper[4943]: I1129 06:35:00.930962 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:00Z","lastTransitionTime":"2025-11-29T06:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.033880 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.033917 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.033960 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.033973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.033982 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.136526 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.136557 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.136585 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.136601 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.136612 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.239376 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.239424 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.239439 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.239460 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.239476 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.327160 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.327236 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.327236 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:01 crc kubenswrapper[4943]: E1129 06:35:01.327283 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:01 crc kubenswrapper[4943]: E1129 06:35:01.327366 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.327412 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:01 crc kubenswrapper[4943]: E1129 06:35:01.327498 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:01 crc kubenswrapper[4943]: E1129 06:35:01.327542 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.341280 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.341355 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.341370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.341389 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.341429 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.443135 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.443179 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.443194 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.443226 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.443245 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.545331 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.545365 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.545376 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.545391 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.545401 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.649367 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.649413 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.649425 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.649441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.649451 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.751958 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.752023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.752043 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.752068 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.752085 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.854314 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.854350 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.854358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.854370 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.854379 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.956897 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.956951 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.956969 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.956994 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:01 crc kubenswrapper[4943]: I1129 06:35:01.957010 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:01Z","lastTransitionTime":"2025-11-29T06:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.059332 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.059368 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.059379 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.059392 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.059401 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.161472 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.161509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.161520 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.161535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.161544 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.264938 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.265019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.265043 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.265072 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.265093 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.367369 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.367408 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.367416 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.367430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.367442 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.469382 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.469425 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.469437 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.469454 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.469465 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.571641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.571685 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.571698 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.571714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.571725 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.673797 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.673838 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.673854 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.673869 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.673879 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.776227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.776287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.776297 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.776314 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.776326 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.878655 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.878713 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.878729 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.878751 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.878768 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.980851 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.980893 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.980909 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.980927 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:02 crc kubenswrapper[4943]: I1129 06:35:02.980937 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:02Z","lastTransitionTime":"2025-11-29T06:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.083049 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.083084 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.083094 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.083108 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.083118 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.185662 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.185739 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.185754 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.185768 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.185776 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.288103 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.288140 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.288150 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.288167 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.288177 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.326972 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.327108 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:03 crc kubenswrapper[4943]: E1129 06:35:03.327160 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.326984 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.327217 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:03 crc kubenswrapper[4943]: E1129 06:35:03.327412 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:03 crc kubenswrapper[4943]: E1129 06:35:03.327513 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:03 crc kubenswrapper[4943]: E1129 06:35:03.327605 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.390845 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.390932 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.390959 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.390982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.390997 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.494399 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.494441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.494453 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.494469 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.494480 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.596530 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.596584 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.596593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.596606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.596617 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.698398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.698697 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.698783 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.698852 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.698917 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.801203 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.801457 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.801519 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.801611 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.801703 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.904352 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.904887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.904951 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.905024 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:03 crc kubenswrapper[4943]: I1129 06:35:03.905086 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:03Z","lastTransitionTime":"2025-11-29T06:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.007628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.007671 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.007680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.007694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.007706 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.110105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.110141 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.110149 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.110163 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.110173 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.213987 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.214036 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.214047 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.214067 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.214079 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.316619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.316926 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.317026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.317108 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.317201 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.337447 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.419603 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.419680 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.419692 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.419707 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.419717 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.522450 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.522498 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.522511 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.522529 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.522542 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.624661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.624964 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.625027 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.625099 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.625200 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.727061 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.727303 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.727408 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.727479 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.727556 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.829533 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.829603 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.829615 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.829628 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.829637 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.932547 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.932623 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.932633 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.932649 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:04 crc kubenswrapper[4943]: I1129 06:35:04.932660 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:04Z","lastTransitionTime":"2025-11-29T06:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.034987 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.035031 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.035040 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.035056 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.035066 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.137871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.137906 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.137916 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.137930 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.137940 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.240188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.240231 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.240241 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.240257 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.240268 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.327078 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:05 crc kubenswrapper[4943]: E1129 06:35:05.327303 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.327690 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.327714 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:05 crc kubenswrapper[4943]: E1129 06:35:05.327817 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.327697 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:05 crc kubenswrapper[4943]: E1129 06:35:05.328141 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:05 crc kubenswrapper[4943]: E1129 06:35:05.328364 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.328402 4943 scope.go:117] "RemoveContainer" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342123 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342658 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342688 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342699 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.342725 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.359970 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.378458 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.393404 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.413288 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.431599 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.445256 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.445531 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.445744 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.445844 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.445941 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.450236 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.462899 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.479947 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.499481 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.512804 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce87105e-8a8c-4813-8bed-4e393d3c1290\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.528675 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.542295 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.550797 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.550841 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.550853 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.550870 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.550881 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.556866 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.570311 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.582640 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.596508 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.614550 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.625970 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:05Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.652934 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.652974 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.652983 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.652998 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.653007 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.754705 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.755026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.755039 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.755057 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.755069 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.858030 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.858080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.858089 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.858105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.858114 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.960156 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.960198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.960207 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.960225 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:05 crc kubenswrapper[4943]: I1129 06:35:05.960234 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:05Z","lastTransitionTime":"2025-11-29T06:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.063100 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.063160 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.063176 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.063200 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.063219 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.167036 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.167077 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.167088 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.167113 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.167124 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.269535 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.269676 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.269701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.269731 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.269755 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.371870 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.371936 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.371954 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.371982 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.371999 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.474917 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.474977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.474997 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.475026 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.475041 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.578117 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.578161 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.578171 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.578187 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.578197 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.681511 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.681593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.681608 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.681626 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.681640 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.756693 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/2.log" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.759603 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.760277 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.780742 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.784250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.784287 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.784300 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.784318 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.784331 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.794260 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.811780 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.833223 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.856753 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.866859 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.877660 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.887166 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.887227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.887240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.887259 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.887272 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.890428 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.902494 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.917076 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.940658 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.954999 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce87105e-8a8c-4813-8bed-4e393d3c1290\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.971451 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.985431 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.989533 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.989592 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.989603 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.989619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:06 crc kubenswrapper[4943]: I1129 06:35:06.989628 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:06Z","lastTransitionTime":"2025-11-29T06:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.001834 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:06Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.015594 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.031663 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.052973 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:35:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.064694 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.092474 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.092528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.092542 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.092557 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.092595 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.194714 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.194948 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.195019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.195080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.195136 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.297539 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.297595 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.297607 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.297621 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.297631 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.326773 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.326793 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.326811 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:07 crc kubenswrapper[4943]: E1129 06:35:07.326928 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:07 crc kubenswrapper[4943]: E1129 06:35:07.327021 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.327073 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:07 crc kubenswrapper[4943]: E1129 06:35:07.327130 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:07 crc kubenswrapper[4943]: E1129 06:35:07.327192 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.400227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.400275 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.400289 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.400308 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.400324 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.502783 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.502825 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.502839 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.502854 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.502864 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.605269 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.605307 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.605318 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.605332 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.605341 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.707669 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.707707 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.707719 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.707734 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.707743 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.771204 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.771856 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/2.log" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.775144 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" exitCode=1 Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.775186 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.775233 4943 scope.go:117] "RemoveContainer" containerID="4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.776602 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:07 crc kubenswrapper[4943]: E1129 06:35:07.777290 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.793898 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811304 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811337 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811400 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.811537 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.824688 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.835339 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.846307 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.865597 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.876225 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce87105e-8a8c-4813-8bed-4e393d3c1290\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.887650 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.900805 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.912730 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.914276 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.914320 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.914330 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.914344 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.914354 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:07Z","lastTransitionTime":"2025-11-29T06:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.926128 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.939396 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.958473 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.970050 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:07 crc kubenswrapper[4943]: I1129 06:35:07.983378 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.000992 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.016419 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.019152 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.019203 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.019215 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.019235 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.019246 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.038271 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f4f72e9056488e51d7ed03e2a1c947038b5015d7d31907353ac16bf82d607b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:34Z\\\",\\\"message\\\":\\\"hine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377359 6588 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 in node crc\\\\nI1129 06:34:34.377364 6588 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-f4gf7 after 0 failed attempt(s)\\\\nI1129 06:34:34.377368 6588 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-f4gf7\\\\nI1129 06:34:34.377376 6588 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377381 6588 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-4wgtt\\\\nI1129 06:34:34.377386 6588 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-4wgtt in node crc\\\\nI1129 06:34:34.377394 6588 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 06:34:34.377450 6588 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-4wgtt] creating logical port openshift-multus_network-metrics-daemon-4wgtt for pod on switch crc\\\\nF1129 06:34:34.377472 6588 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:35:07Z\\\",\\\"message\\\":\\\"as stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z]\\\\nI1129 06:35:07.193012 7000 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI1129 06:35:07.193039 7000 services_controller.go:451] Built service openshift-network-console/networking-console-plugin cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-network-console/networking-console-plugin_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-network-console/networking-console-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Swi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:35:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.052350 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.123123 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.123203 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.123239 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.123264 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.123280 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.226024 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.226062 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.226072 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.226086 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.226097 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.328441 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.328504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.328516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.328539 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.328553 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.430934 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.430979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.430989 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.431005 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.431016 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.533358 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.533406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.533416 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.533431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.533441 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.636497 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.636615 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.636642 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.636675 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.636698 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.739816 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.739860 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.739871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.739886 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.739897 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.779980 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.783939 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:08 crc kubenswrapper[4943]: E1129 06:35:08.784076 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.796686 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.808499 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.826076 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.835753 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce87105e-8a8c-4813-8bed-4e393d3c1290\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.841886 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.841916 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.841925 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.841948 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.841960 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.847632 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.858557 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.868856 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.878702 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.886345 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.896293 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.906803 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.921043 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944432 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944474 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944483 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944497 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944508 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:08Z","lastTransitionTime":"2025-11-29T06:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.944938 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:35:07Z\\\",\\\"message\\\":\\\"as stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z]\\\\nI1129 06:35:07.193012 7000 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI1129 06:35:07.193039 7000 services_controller.go:451] Built service openshift-network-console/networking-console-plugin cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-network-console/networking-console-plugin_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-network-console/networking-console-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Swi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:35:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.954969 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.965990 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.977428 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.987968 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:08 crc kubenswrapper[4943]: I1129 06:35:08.997885 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:08Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.008582 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.046528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.046606 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.046619 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.046636 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.046647 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.149074 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.149121 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.149132 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.149148 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.149159 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.198061 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.198181 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198222 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:13.198187446 +0000 UTC m=+148.128276199 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198286 4943 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.198343 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198362 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:13.1983459 +0000 UTC m=+148.128434653 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.198389 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198521 4943 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198534 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198553 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198591 4943 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198604 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:13.198591707 +0000 UTC m=+148.128680500 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.198631 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:13.198622338 +0000 UTC m=+148.128711101 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.251323 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.251354 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.251363 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.251376 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.251386 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.299379 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.299548 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.299621 4943 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.299632 4943 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.299686 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:13.299669945 +0000 UTC m=+148.229758698 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.326788 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.326875 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.326922 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.327019 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.327069 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.327127 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.327177 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.327230 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.353363 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.353395 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.353406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.353420 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.353429 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.456123 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.456163 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.456174 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.456191 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.456202 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.559506 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.559620 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.559641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.559670 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.559690 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.661934 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.661984 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.662002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.662019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.662031 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.764880 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.764912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.764923 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.764940 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.764951 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.867120 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.867161 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.867171 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.867187 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.867197 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.868293 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.868328 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.868359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.868374 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.868384 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.878831 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.881871 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.881902 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.881912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.881928 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.881939 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.893662 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.896504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.896552 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.896596 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.896617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.896632 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.908550 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.911516 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.911548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.911557 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.911589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.911600 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.921726 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.924536 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.924579 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.924591 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.924604 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.924615 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.936906 4943 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T06:35:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"56e5e96f-c824-4364-b35c-8cd0f292a058\\\",\\\"systemUUID\\\":\\\"7b86982e-0266-40f9-aa86-203795c8126b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:09Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:09 crc kubenswrapper[4943]: E1129 06:35:09.937013 4943 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.969327 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.969362 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.969372 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.969388 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:09 crc kubenswrapper[4943]: I1129 06:35:09.969399 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:09Z","lastTransitionTime":"2025-11-29T06:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.071593 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.071673 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.071696 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.071728 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.071749 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.173947 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.173987 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.173999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.174015 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.174024 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.277665 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.277708 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.277732 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.277775 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.277804 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.380546 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.380603 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.380613 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.380658 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.380668 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.484361 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.484473 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.484487 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.484525 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.484540 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.586509 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.586589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.586605 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.586629 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.586646 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.690105 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.690170 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.690188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.690212 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.690231 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.793040 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.793082 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.793096 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.793112 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.793127 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.896726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.896779 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.896792 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.896810 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.896824 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.999407 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.999454 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.999465 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.999483 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:10 crc kubenswrapper[4943]: I1129 06:35:10.999494 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:10Z","lastTransitionTime":"2025-11-29T06:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.101303 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.101338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.101346 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.101361 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.101370 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.204468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.204527 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.204544 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.204590 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.204606 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.307452 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.307530 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.307541 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.307557 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.307584 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.327080 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.327136 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.327155 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.327108 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:11 crc kubenswrapper[4943]: E1129 06:35:11.327251 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:11 crc kubenswrapper[4943]: E1129 06:35:11.327320 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:11 crc kubenswrapper[4943]: E1129 06:35:11.327445 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:11 crc kubenswrapper[4943]: E1129 06:35:11.327552 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.409805 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.409859 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.409867 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.409887 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.409897 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.511970 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.512034 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.512052 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.512076 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.512092 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.614661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.614693 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.614702 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.614715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.614725 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.716946 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.716984 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.716994 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.717008 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.717054 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.819999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.820045 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.820060 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.820138 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.820153 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.923805 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.923873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.923888 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.923912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:11 crc kubenswrapper[4943]: I1129 06:35:11.923933 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:11Z","lastTransitionTime":"2025-11-29T06:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.028359 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.028406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.028415 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.028430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.028440 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.130658 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.130700 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.130711 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.130727 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.130740 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.233929 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.233984 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.233999 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.234018 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.234030 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.336169 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.336219 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.336231 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.336248 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.336259 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.438715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.438990 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.439002 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.439080 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.439091 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.540670 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.540701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.540709 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.540722 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.540730 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.642919 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.642956 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.642966 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.642979 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.642989 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.745419 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.745474 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.745486 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.745503 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.745514 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.848931 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.848973 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.848996 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.849019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.849028 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.952086 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.952130 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.952144 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.952158 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:12 crc kubenswrapper[4943]: I1129 06:35:12.952169 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:12Z","lastTransitionTime":"2025-11-29T06:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.054743 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.054786 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.054795 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.054834 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.054844 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.157422 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.157476 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.157485 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.157501 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.157513 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.260431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.260468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.260497 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.260512 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.260520 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.326701 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.326813 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.326879 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.326940 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:13 crc kubenswrapper[4943]: E1129 06:35:13.326955 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:13 crc kubenswrapper[4943]: E1129 06:35:13.326931 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:13 crc kubenswrapper[4943]: E1129 06:35:13.327066 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:13 crc kubenswrapper[4943]: E1129 06:35:13.327152 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.363315 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.363352 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.363363 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.363377 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.363388 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.465429 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.465468 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.465479 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.465493 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.465502 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.568048 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.568089 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.568099 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.568114 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.568126 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.670986 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.671023 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.671031 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.671046 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.671055 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.774198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.774235 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.774245 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.774260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.774271 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.877449 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.877507 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.877524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.877548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.877595 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.980883 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.980971 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.980996 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.981019 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:13 crc kubenswrapper[4943]: I1129 06:35:13.981037 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:13Z","lastTransitionTime":"2025-11-29T06:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.083306 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.083355 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.083364 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.083379 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.083388 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.186395 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.186438 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.186451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.186467 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.186478 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.289430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.289487 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.289504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.289522 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.289534 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.391734 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.391826 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.391873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.391895 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.391912 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.494530 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.494663 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.494688 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.494715 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.494734 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.596546 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.596626 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.596635 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.596649 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.596679 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.699497 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.699530 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.699538 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.699552 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.699579 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.802347 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.802385 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.802392 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.802406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.802414 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.905726 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.905794 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.905808 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.905834 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:14 crc kubenswrapper[4943]: I1129 06:35:14.905851 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:14Z","lastTransitionTime":"2025-11-29T06:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.008403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.008452 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.008461 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.008477 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.008488 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.111431 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.111479 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.111489 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.111504 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.111514 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.214205 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.214250 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.214260 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.214276 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.214312 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.316410 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.316524 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.316536 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.316548 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.316557 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.326982 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.327033 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.327115 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:15 crc kubenswrapper[4943]: E1129 06:35:15.327116 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:15 crc kubenswrapper[4943]: E1129 06:35:15.327227 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.327277 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:15 crc kubenswrapper[4943]: E1129 06:35:15.327348 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:15 crc kubenswrapper[4943]: E1129 06:35:15.327410 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.343333 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfcab1db-586e-49c2-99e1-7886b7d75e47\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T06:34:04Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 06:33:58.607095 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 06:33:58.607798 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870619513/tls.crt::/tmp/serving-cert-870619513/tls.key\\\\\\\"\\\\nI1129 06:34:04.667643 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 06:34:04.671635 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 06:34:04.671658 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 06:34:04.671678 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 06:34:04.671682 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 06:34:04.677948 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 06:34:04.677980 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677985 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 06:34:04.677989 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 06:34:04.677993 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 06:34:04.677996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 06:34:04.678000 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 06:34:04.678010 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 06:34:04.680301 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.356970 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fc02c4102dd3bd50b81fcd220286fdce4dd3a6c218663577d09078b62a6d89b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.369421 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.379755 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-drzxp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43c2941f-a05b-4905-a72e-a5978229642e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d54bba5ca6e2165b0b17816affe6498cbc747ef2656d9cd15174d02d1bdcb7f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6grpl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-drzxp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.391459 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeaa4eb3-534c-4088-bd56-6beb70d968e7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6ddecc981692f6b649505b25e3515eef2e65b70fc56deefdb59188382e0df78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d9fa381e82ed5449939f523e9ff7d705ce3977a2afc4130be21785dd095fdfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc4xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-svhjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.406294 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae851705-b905-4caa-932a-345918c2d3f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d3b3d8d03dd4288e497214d8375a5c6ffb7b00f6ae863672d124fe321c866fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab523540e13820981578e208c677a124cdc87fc7ddc852e5795501e146665793\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6aa4be7346e410304fa1e2d9bafe5e4373ca5b05f46d4bd2c8ba8dba85bec953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://618c978f89eb611b3d0af29e9833cd1d31e400a91cc2c0e7cb53f062d67f7d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ca897dec12c34ee8514907eb076031fbce7eb19af44a054919ae126a3cbd5a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://835d4f6907a7b2db3fc54da21a589f0e4e884848717b386b13f0b4022301048a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd20ae1e68cba0106dd27f5473f5673d36649bf7e403bd1f935a9fafdc13f738\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcshj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ptcqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.420217 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.420276 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.420289 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.420309 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.420339 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.436146 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b136a990-e70e-469b-a8d0-7227e95581af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4519d66c692b5d297e6dcc495c9bd9c0fa0fb5389ee033f26128a281988e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://984335a92bde2cb1a9dab970731bd59f05b9f4d66d3ebd793373dee015788d1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25665e7fa7ecbebdfa077d3b3434571f78c1ee0963b29403e05a84c8c1736d6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7c9fd0e9ea13a4939e832c1e74e3425e4b1d3394d3a2a217d747e660ccaae5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d6209870e6b1defbeefe5a47cadec897d08cbcf574ceded463e5193e6a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://adc23e3b6ed758c8e6cdabe14750ce89a11fac36f4fbd3e60cca80cf7411d4e1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fef7ffc5d5243cd3e7461afc10eef32ce204636d97778d72b19d4aab00e4ce16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c66c2202d901f75d19b2efa7b0efc439d9ab22a757e86cfc20a4ff71b54746a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.449744 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce87105e-8a8c-4813-8bed-4e393d3c1290\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dd0bda7516c0dffbe96e5bb9011ff88cd6d7069047000e3db8a177262385e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4202dd7fd09d989d403c967f1832dd16a9a5de03b0c757864d66722d2b654842\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.464490 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37c6c45b-d78a-4816-9ac8-f27a6d1ee62d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2e083fcc229954cc8b5de294bbe157981ba621e8ede6b751c5e0ef770e4f7d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://45a0d66fb5c5517645cf0438e32c1635953d9068d172708f241b2bab88fc53d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9df81849dc16df68f34ab5aa06cd129d03e03515586805437879c105320590e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.478446 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"beba65b6-6e02-41ec-b075-358c6655f7fc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:33:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd4f391cfbd55a97e5ffa200adf36225a20d6c50e74794858a4fd9d3518c3313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2373f7c5818067ea98c7206ba2894e78db45101f72044d23f969992b6f1d87c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://18d40bfb3821d1d3f6dd057516a36e0d3c0a3dbcefeed02e7e052c76fc146c90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:33:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f609946ad4e3d4b5680dda97ce143d0ccdcfdcf2ce2e46943e94a73e4e117d1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:33:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:33:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:33:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.494610 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9726deef0b8b8e7cb4f246e9eb4753a4cce5b7363001b93ea583ec729b80ae72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.508740 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.519161 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9452a4f7-8768-4190-b544-50f80bc5ebf6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fcff3a323b3126f8292b9fea5344843cef7f68432266cb1a6687fdb2790ac31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6gtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f4gf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.522878 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.522922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.522932 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.522947 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.522957 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.530806 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-fm4js" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3b1af38-c6d6-4943-9cb4-a0482aee684d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0cfd17892bdff2b1b4bfce4fc06a040b089c74587f4bc0bdf553ceab27acf761\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbkpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-fm4js\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.545987 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cead964e9db985819ba744f1a7c0db1df8037d6b2373ed64f8a50dfffb1e0b56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c085885982b3cce81042db15906649ae28930b07fcd5b44afce9129c9aa8fad2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.561483 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.576499 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kh8qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca406df5-4c80-44b5-9092-4ff17b0b0c72\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:34:52Z\\\",\\\"message\\\":\\\"2025-11-29T06:34:07+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5\\\\n2025-11-29T06:34:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6dadb029-6f25-4f93-b889-2d263b2db2c5 to /host/opt/cni/bin/\\\\n2025-11-29T06:34:07Z [verbose] multus-daemon started\\\\n2025-11-29T06:34:07Z [verbose] Readiness Indicator file check\\\\n2025-11-29T06:34:52Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmqqg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kh8qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.597818 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ac9747-c331-4c4f-af69-5153d05f4097\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T06:35:07Z\\\",\\\"message\\\":\\\"as stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:07Z is after 2025-08-24T17:21:41Z]\\\\nI1129 06:35:07.193012 7000 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI1129 06:35:07.193039 7000 services_controller.go:451] Built service openshift-network-console/networking-console-plugin cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-network-console/networking-console-plugin_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-network-console/networking-console-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Swi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T06:35:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T06:34:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T06:34:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T06:34:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qfg9s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lrsts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.611130 4943 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T06:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-92kj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T06:34:19Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4wgtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T06:35:15Z is after 2025-08-24T17:21:41Z" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.626162 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.626217 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.626227 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.626282 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.626299 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.728873 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.728911 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.728919 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.728934 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.728943 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.830881 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.830922 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.830932 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.830946 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.830956 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.933195 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.933243 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.933253 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.933268 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:15 crc kubenswrapper[4943]: I1129 06:35:15.933279 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:15Z","lastTransitionTime":"2025-11-29T06:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.035703 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.035744 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.035752 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.035766 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.035775 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.138373 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.138451 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.138466 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.138484 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.138496 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.242121 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.242177 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.242191 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.242211 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.242233 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.344914 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.344941 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.344949 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.344961 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.344991 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.447119 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.447528 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.447616 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.447688 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.447760 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.549836 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.549875 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.549884 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.549897 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.549907 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.652207 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.652254 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.652267 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.652284 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.652294 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.754851 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.755103 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.755201 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.755273 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.755361 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.857336 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.857398 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.857415 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.857437 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.857454 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.960181 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.960217 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.960226 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.960240 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:16 crc kubenswrapper[4943]: I1129 06:35:16.960249 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:16Z","lastTransitionTime":"2025-11-29T06:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.062616 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.062663 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.062675 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.062693 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.062703 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.186152 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.186188 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.186199 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.186214 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.186226 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.288146 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.288186 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.288198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.288216 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.288228 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.327148 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.327370 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.327394 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:17 crc kubenswrapper[4943]: E1129 06:35:17.327494 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:17 crc kubenswrapper[4943]: E1129 06:35:17.327627 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.327766 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:17 crc kubenswrapper[4943]: E1129 06:35:17.327796 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:17 crc kubenswrapper[4943]: E1129 06:35:17.328074 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.390587 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.390640 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.390652 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.390670 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.390683 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.492843 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.492883 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.492895 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.492912 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.492925 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.594645 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.594686 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.594694 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.594709 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.594721 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.697181 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.697406 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.697732 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.697885 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.697956 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.800338 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.800403 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.800422 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.800445 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.800462 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.902760 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.902796 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.902805 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.902820 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:17 crc kubenswrapper[4943]: I1129 06:35:17.902829 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:17Z","lastTransitionTime":"2025-11-29T06:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.005685 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.005766 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.005793 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.005825 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.005851 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.109163 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.109198 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.109207 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.109222 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.109232 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.211880 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.211909 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.211917 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.211929 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.211937 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.315266 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.315318 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.315334 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.315356 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.315370 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.418237 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.418274 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.418284 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.418298 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.418307 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.521622 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.521659 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.521668 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.521683 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.521692 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.624463 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.624498 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.624506 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.624522 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.624530 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.727612 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.727677 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.727689 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.727705 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.727716 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.829586 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.829630 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.829641 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.829657 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.829668 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.932478 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.932526 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.932537 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.932554 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:18 crc kubenswrapper[4943]: I1129 06:35:18.932594 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:18Z","lastTransitionTime":"2025-11-29T06:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.034782 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.034816 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.034824 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.034835 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.034844 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.137263 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.137305 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.137315 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.137329 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.137339 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.239082 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.239140 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.239164 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.239177 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.239186 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.326844 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:19 crc kubenswrapper[4943]: E1129 06:35:19.326984 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.327915 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.328062 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.328168 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:19 crc kubenswrapper[4943]: E1129 06:35:19.328352 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.328098 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:19 crc kubenswrapper[4943]: E1129 06:35:19.328457 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:19 crc kubenswrapper[4943]: E1129 06:35:19.328549 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:19 crc kubenswrapper[4943]: E1129 06:35:19.328639 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.341430 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.341655 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.341746 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.341822 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.342029 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.444401 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.444772 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.444876 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.444977 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.445054 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.547625 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.547656 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.547664 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.547678 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.547691 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.649515 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.649589 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.649603 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.649617 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.649629 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.752620 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.752648 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.752658 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.752674 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.752686 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.854780 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.854820 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.854828 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.854843 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.854854 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.958016 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.958079 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.958102 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.958127 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:19 crc kubenswrapper[4943]: I1129 06:35:19.958144 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:19Z","lastTransitionTime":"2025-11-29T06:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.061594 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.061661 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.061676 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.061701 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.061717 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:20Z","lastTransitionTime":"2025-11-29T06:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.110064 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.110134 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.110155 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.110186 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.110209 4943 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T06:35:20Z","lastTransitionTime":"2025-11-29T06:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.168247 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs"] Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.168613 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.170656 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.170812 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.170849 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.171924 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.185978 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-drzxp" podStartSLOduration=75.185958445 podStartE2EDuration="1m15.185958445s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.185806741 +0000 UTC m=+95.115895534" watchObservedRunningTime="2025-11-29 06:35:20.185958445 +0000 UTC m=+95.116047208" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.207090 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-svhjp" podStartSLOduration=74.207068796 podStartE2EDuration="1m14.207068796s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.205393262 +0000 UTC m=+95.135482025" watchObservedRunningTime="2025-11-29 06:35:20.207068796 +0000 UTC m=+95.137157549" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.214343 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5279ca36-96a9-41b3-980c-8366fb18feef-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.214401 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.214477 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5279ca36-96a9-41b3-980c-8366fb18feef-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.214510 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.214532 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5279ca36-96a9-41b3-980c-8366fb18feef-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.235003 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.234989819 podStartE2EDuration="1m15.234989819s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.221380207 +0000 UTC m=+95.151468960" watchObservedRunningTime="2025-11-29 06:35:20.234989819 +0000 UTC m=+95.165078572" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.258574 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=40.258540305 podStartE2EDuration="40.258540305s" podCreationTimestamp="2025-11-29 06:34:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.257526698 +0000 UTC m=+95.187615451" watchObservedRunningTime="2025-11-29 06:35:20.258540305 +0000 UTC m=+95.188629058" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.307229 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podStartSLOduration=75.30721131 podStartE2EDuration="1m15.30721131s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.291847152 +0000 UTC m=+95.221935915" watchObservedRunningTime="2025-11-29 06:35:20.30721131 +0000 UTC m=+95.237300053" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.307419 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-ptcqh" podStartSLOduration=75.307415285 podStartE2EDuration="1m15.307415285s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.306996794 +0000 UTC m=+95.237085557" watchObservedRunningTime="2025-11-29 06:35:20.307415285 +0000 UTC m=+95.237504038" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315038 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5279ca36-96a9-41b3-980c-8366fb18feef-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315097 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315143 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5279ca36-96a9-41b3-980c-8366fb18feef-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315171 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315193 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5279ca36-96a9-41b3-980c-8366fb18feef-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315263 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.315299 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5279ca36-96a9-41b3-980c-8366fb18feef-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.316044 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5279ca36-96a9-41b3-980c-8366fb18feef-service-ca\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.322263 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5279ca36-96a9-41b3-980c-8366fb18feef-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.329953 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5279ca36-96a9-41b3-980c-8366fb18feef-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-sqzxs\" (UID: \"5279ca36-96a9-41b3-980c-8366fb18feef\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.357029 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=73.357010794 podStartE2EDuration="1m13.357010794s" podCreationTimestamp="2025-11-29 06:34:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.34142239 +0000 UTC m=+95.271511143" watchObservedRunningTime="2025-11-29 06:35:20.357010794 +0000 UTC m=+95.287099557" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.357485 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=16.357479526 podStartE2EDuration="16.357479526s" podCreationTimestamp="2025-11-29 06:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.35685347 +0000 UTC m=+95.286942223" watchObservedRunningTime="2025-11-29 06:35:20.357479526 +0000 UTC m=+95.287568279" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.371612 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=72.371596662 podStartE2EDuration="1m12.371596662s" podCreationTimestamp="2025-11-29 06:34:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.37114061 +0000 UTC m=+95.301229363" watchObservedRunningTime="2025-11-29 06:35:20.371596662 +0000 UTC m=+95.301685425" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.392103 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-fm4js" podStartSLOduration=75.392087217 podStartE2EDuration="1m15.392087217s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.380623032 +0000 UTC m=+95.310711795" watchObservedRunningTime="2025-11-29 06:35:20.392087217 +0000 UTC m=+95.322175970" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.429308 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-kh8qv" podStartSLOduration=75.429289707 podStartE2EDuration="1m15.429289707s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.428782063 +0000 UTC m=+95.358870836" watchObservedRunningTime="2025-11-29 06:35:20.429289707 +0000 UTC m=+95.359378460" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.487601 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.819999 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" event={"ID":"5279ca36-96a9-41b3-980c-8366fb18feef","Type":"ContainerStarted","Data":"31a913a72f0ebb9d936d5a621263aaea58abea2db7522d8aab4f978ac0e69dc8"} Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.820079 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" event={"ID":"5279ca36-96a9-41b3-980c-8366fb18feef","Type":"ContainerStarted","Data":"795cdda23d73c7a902ababffac20d240ce97b9cdae1df47d789589c367f5913b"} Nov 29 06:35:20 crc kubenswrapper[4943]: I1129 06:35:20.831949 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-sqzxs" podStartSLOduration=75.831932336 podStartE2EDuration="1m15.831932336s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:20.831658849 +0000 UTC m=+95.761747612" watchObservedRunningTime="2025-11-29 06:35:20.831932336 +0000 UTC m=+95.762021089" Nov 29 06:35:21 crc kubenswrapper[4943]: I1129 06:35:21.326932 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:21 crc kubenswrapper[4943]: I1129 06:35:21.326966 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:21 crc kubenswrapper[4943]: I1129 06:35:21.326992 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:21 crc kubenswrapper[4943]: E1129 06:35:21.327064 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:21 crc kubenswrapper[4943]: E1129 06:35:21.327153 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:21 crc kubenswrapper[4943]: E1129 06:35:21.327337 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:21 crc kubenswrapper[4943]: I1129 06:35:21.327369 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:21 crc kubenswrapper[4943]: E1129 06:35:21.327523 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:23 crc kubenswrapper[4943]: I1129 06:35:23.327191 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:23 crc kubenswrapper[4943]: I1129 06:35:23.327258 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.328484 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:23 crc kubenswrapper[4943]: I1129 06:35:23.328099 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:23 crc kubenswrapper[4943]: I1129 06:35:23.327263 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.328708 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.328824 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.328338 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:23 crc kubenswrapper[4943]: I1129 06:35:23.655091 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.655247 4943 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:35:23 crc kubenswrapper[4943]: E1129 06:35:23.655327 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs podName:b7b0785d-0c62-4fef-83aa-a9d32e9d388b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:27.655306541 +0000 UTC m=+162.585395294 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs") pod "network-metrics-daemon-4wgtt" (UID: "b7b0785d-0c62-4fef-83aa-a9d32e9d388b") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 06:35:25 crc kubenswrapper[4943]: I1129 06:35:25.326858 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:25 crc kubenswrapper[4943]: I1129 06:35:25.326872 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:25 crc kubenswrapper[4943]: I1129 06:35:25.328200 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:25 crc kubenswrapper[4943]: E1129 06:35:25.329700 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:25 crc kubenswrapper[4943]: I1129 06:35:25.330027 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:25 crc kubenswrapper[4943]: E1129 06:35:25.330167 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:25 crc kubenswrapper[4943]: E1129 06:35:25.330382 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:25 crc kubenswrapper[4943]: E1129 06:35:25.330601 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:27 crc kubenswrapper[4943]: I1129 06:35:27.327008 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:27 crc kubenswrapper[4943]: I1129 06:35:27.327011 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:27 crc kubenswrapper[4943]: E1129 06:35:27.327862 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:27 crc kubenswrapper[4943]: I1129 06:35:27.327239 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:27 crc kubenswrapper[4943]: I1129 06:35:27.327037 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:27 crc kubenswrapper[4943]: E1129 06:35:27.327956 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:27 crc kubenswrapper[4943]: E1129 06:35:27.327995 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:27 crc kubenswrapper[4943]: E1129 06:35:27.328165 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:29 crc kubenswrapper[4943]: I1129 06:35:29.327153 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:29 crc kubenswrapper[4943]: I1129 06:35:29.327221 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:29 crc kubenswrapper[4943]: I1129 06:35:29.327257 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:29 crc kubenswrapper[4943]: I1129 06:35:29.327301 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:29 crc kubenswrapper[4943]: E1129 06:35:29.327302 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:29 crc kubenswrapper[4943]: E1129 06:35:29.327425 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:29 crc kubenswrapper[4943]: E1129 06:35:29.327467 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:29 crc kubenswrapper[4943]: E1129 06:35:29.327520 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:30 crc kubenswrapper[4943]: I1129 06:35:30.328215 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:30 crc kubenswrapper[4943]: E1129 06:35:30.328523 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:35:31 crc kubenswrapper[4943]: I1129 06:35:31.326874 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:31 crc kubenswrapper[4943]: I1129 06:35:31.327377 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:31 crc kubenswrapper[4943]: I1129 06:35:31.327396 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:31 crc kubenswrapper[4943]: E1129 06:35:31.327519 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:31 crc kubenswrapper[4943]: I1129 06:35:31.327628 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:31 crc kubenswrapper[4943]: E1129 06:35:31.327669 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:31 crc kubenswrapper[4943]: E1129 06:35:31.327744 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:31 crc kubenswrapper[4943]: E1129 06:35:31.327945 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:33 crc kubenswrapper[4943]: I1129 06:35:33.326738 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:33 crc kubenswrapper[4943]: I1129 06:35:33.326800 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:33 crc kubenswrapper[4943]: I1129 06:35:33.326832 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:33 crc kubenswrapper[4943]: E1129 06:35:33.326961 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:33 crc kubenswrapper[4943]: E1129 06:35:33.327284 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:33 crc kubenswrapper[4943]: E1129 06:35:33.327503 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:33 crc kubenswrapper[4943]: I1129 06:35:33.327717 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:33 crc kubenswrapper[4943]: E1129 06:35:33.327820 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:35 crc kubenswrapper[4943]: I1129 06:35:35.326603 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:35 crc kubenswrapper[4943]: I1129 06:35:35.326641 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:35 crc kubenswrapper[4943]: I1129 06:35:35.326685 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:35 crc kubenswrapper[4943]: E1129 06:35:35.328103 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:35 crc kubenswrapper[4943]: I1129 06:35:35.328134 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:35 crc kubenswrapper[4943]: E1129 06:35:35.328243 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:35 crc kubenswrapper[4943]: E1129 06:35:35.328309 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:35 crc kubenswrapper[4943]: E1129 06:35:35.328341 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:37 crc kubenswrapper[4943]: I1129 06:35:37.326519 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:37 crc kubenswrapper[4943]: I1129 06:35:37.326687 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:37 crc kubenswrapper[4943]: I1129 06:35:37.326605 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:37 crc kubenswrapper[4943]: E1129 06:35:37.326747 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:37 crc kubenswrapper[4943]: I1129 06:35:37.326938 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:37 crc kubenswrapper[4943]: E1129 06:35:37.327019 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:37 crc kubenswrapper[4943]: E1129 06:35:37.327115 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:37 crc kubenswrapper[4943]: E1129 06:35:37.327320 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.326706 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.326800 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.326741 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:39 crc kubenswrapper[4943]: E1129 06:35:39.326966 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:39 crc kubenswrapper[4943]: E1129 06:35:39.326847 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:39 crc kubenswrapper[4943]: E1129 06:35:39.326895 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.326722 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:39 crc kubenswrapper[4943]: E1129 06:35:39.327086 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.883383 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/1.log" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.883900 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/0.log" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.883951 4943 generic.go:334] "Generic (PLEG): container finished" podID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" containerID="02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a" exitCode=1 Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.883977 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerDied","Data":"02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a"} Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.884008 4943 scope.go:117] "RemoveContainer" containerID="9f9ae7a05452fe7dbc58a0f4328e2957eb2a54324c57b1680077a415940a8a69" Nov 29 06:35:39 crc kubenswrapper[4943]: I1129 06:35:39.884439 4943 scope.go:117] "RemoveContainer" containerID="02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a" Nov 29 06:35:39 crc kubenswrapper[4943]: E1129 06:35:39.884684 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-kh8qv_openshift-multus(ca406df5-4c80-44b5-9092-4ff17b0b0c72)\"" pod="openshift-multus/multus-kh8qv" podUID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" Nov 29 06:35:40 crc kubenswrapper[4943]: I1129 06:35:40.891135 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/1.log" Nov 29 06:35:41 crc kubenswrapper[4943]: I1129 06:35:41.326701 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:41 crc kubenswrapper[4943]: E1129 06:35:41.327179 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:41 crc kubenswrapper[4943]: I1129 06:35:41.327411 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:41 crc kubenswrapper[4943]: I1129 06:35:41.327447 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:41 crc kubenswrapper[4943]: I1129 06:35:41.327473 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:41 crc kubenswrapper[4943]: E1129 06:35:41.327532 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:41 crc kubenswrapper[4943]: E1129 06:35:41.327859 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:41 crc kubenswrapper[4943]: E1129 06:35:41.328211 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:41 crc kubenswrapper[4943]: I1129 06:35:41.328513 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:41 crc kubenswrapper[4943]: E1129 06:35:41.328700 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lrsts_openshift-ovn-kubernetes(78ac9747-c331-4c4f-af69-5153d05f4097)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" Nov 29 06:35:43 crc kubenswrapper[4943]: I1129 06:35:43.326914 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:43 crc kubenswrapper[4943]: I1129 06:35:43.326995 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:43 crc kubenswrapper[4943]: I1129 06:35:43.327037 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:43 crc kubenswrapper[4943]: E1129 06:35:43.327160 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:43 crc kubenswrapper[4943]: I1129 06:35:43.327216 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:43 crc kubenswrapper[4943]: E1129 06:35:43.327299 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:43 crc kubenswrapper[4943]: E1129 06:35:43.327445 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:43 crc kubenswrapper[4943]: E1129 06:35:43.327521 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:45 crc kubenswrapper[4943]: I1129 06:35:45.326789 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:45 crc kubenswrapper[4943]: I1129 06:35:45.326825 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:45 crc kubenswrapper[4943]: I1129 06:35:45.326879 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:45 crc kubenswrapper[4943]: I1129 06:35:45.326789 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.328209 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.328395 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.328555 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.328507 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.353662 4943 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 29 06:35:45 crc kubenswrapper[4943]: E1129 06:35:45.533551 4943 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 06:35:47 crc kubenswrapper[4943]: I1129 06:35:47.326988 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:47 crc kubenswrapper[4943]: I1129 06:35:47.327059 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:47 crc kubenswrapper[4943]: E1129 06:35:47.328132 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:47 crc kubenswrapper[4943]: I1129 06:35:47.327073 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:47 crc kubenswrapper[4943]: I1129 06:35:47.327108 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:47 crc kubenswrapper[4943]: E1129 06:35:47.328332 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:47 crc kubenswrapper[4943]: E1129 06:35:47.328674 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:47 crc kubenswrapper[4943]: E1129 06:35:47.328836 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:49 crc kubenswrapper[4943]: I1129 06:35:49.327199 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:49 crc kubenswrapper[4943]: I1129 06:35:49.327232 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:49 crc kubenswrapper[4943]: I1129 06:35:49.327200 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:49 crc kubenswrapper[4943]: I1129 06:35:49.327285 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:49 crc kubenswrapper[4943]: E1129 06:35:49.327330 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:49 crc kubenswrapper[4943]: E1129 06:35:49.327414 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:49 crc kubenswrapper[4943]: E1129 06:35:49.327503 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:49 crc kubenswrapper[4943]: E1129 06:35:49.327647 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:50 crc kubenswrapper[4943]: E1129 06:35:50.535309 4943 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 06:35:51 crc kubenswrapper[4943]: I1129 06:35:51.326981 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:51 crc kubenswrapper[4943]: I1129 06:35:51.327053 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:51 crc kubenswrapper[4943]: I1129 06:35:51.327135 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:51 crc kubenswrapper[4943]: E1129 06:35:51.327169 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:51 crc kubenswrapper[4943]: E1129 06:35:51.327254 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:51 crc kubenswrapper[4943]: E1129 06:35:51.327351 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:51 crc kubenswrapper[4943]: I1129 06:35:51.327511 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:51 crc kubenswrapper[4943]: E1129 06:35:51.327679 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.327242 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.327283 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.327316 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:53 crc kubenswrapper[4943]: E1129 06:35:53.327391 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.327466 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:53 crc kubenswrapper[4943]: E1129 06:35:53.327751 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.327849 4943 scope.go:117] "RemoveContainer" containerID="02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a" Nov 29 06:35:53 crc kubenswrapper[4943]: E1129 06:35:53.327977 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:53 crc kubenswrapper[4943]: E1129 06:35:53.328738 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.933108 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/1.log" Nov 29 06:35:53 crc kubenswrapper[4943]: I1129 06:35:53.933471 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"5eaaa6b16990ee0172d1ad8e17aa360746d2ac18384845a0a4717f2a0f9fce7d"} Nov 29 06:35:55 crc kubenswrapper[4943]: I1129 06:35:55.327312 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:55 crc kubenswrapper[4943]: I1129 06:35:55.327357 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:55 crc kubenswrapper[4943]: E1129 06:35:55.327461 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:55 crc kubenswrapper[4943]: I1129 06:35:55.327501 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:55 crc kubenswrapper[4943]: I1129 06:35:55.327493 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:55 crc kubenswrapper[4943]: E1129 06:35:55.328895 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:55 crc kubenswrapper[4943]: E1129 06:35:55.329015 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:55 crc kubenswrapper[4943]: E1129 06:35:55.329088 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:55 crc kubenswrapper[4943]: E1129 06:35:55.536031 4943 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 06:35:56 crc kubenswrapper[4943]: I1129 06:35:56.327928 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:35:56 crc kubenswrapper[4943]: I1129 06:35:56.945890 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:35:56 crc kubenswrapper[4943]: I1129 06:35:56.948238 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerStarted","Data":"f91c2b38743f6430b94e6c36188aa34280ee66f6688f3f0dc8fdce3c572704c2"} Nov 29 06:35:56 crc kubenswrapper[4943]: I1129 06:35:56.948705 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:35:56 crc kubenswrapper[4943]: I1129 06:35:56.977388 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podStartSLOduration=110.977371926 podStartE2EDuration="1m50.977371926s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:35:56.975864186 +0000 UTC m=+131.905952959" watchObservedRunningTime="2025-11-29 06:35:56.977371926 +0000 UTC m=+131.907460679" Nov 29 06:35:57 crc kubenswrapper[4943]: I1129 06:35:57.022001 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4wgtt"] Nov 29 06:35:57 crc kubenswrapper[4943]: I1129 06:35:57.022137 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:57 crc kubenswrapper[4943]: E1129 06:35:57.022241 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:35:57 crc kubenswrapper[4943]: I1129 06:35:57.327372 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:57 crc kubenswrapper[4943]: I1129 06:35:57.327372 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:57 crc kubenswrapper[4943]: E1129 06:35:57.327609 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:57 crc kubenswrapper[4943]: E1129 06:35:57.327701 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:57 crc kubenswrapper[4943]: I1129 06:35:57.327889 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:57 crc kubenswrapper[4943]: E1129 06:35:57.327973 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:59 crc kubenswrapper[4943]: I1129 06:35:59.327065 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:35:59 crc kubenswrapper[4943]: I1129 06:35:59.327117 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:35:59 crc kubenswrapper[4943]: I1129 06:35:59.327129 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:35:59 crc kubenswrapper[4943]: E1129 06:35:59.327180 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 06:35:59 crc kubenswrapper[4943]: I1129 06:35:59.327081 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:35:59 crc kubenswrapper[4943]: E1129 06:35:59.327299 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 06:35:59 crc kubenswrapper[4943]: E1129 06:35:59.327348 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 06:35:59 crc kubenswrapper[4943]: E1129 06:35:59.327423 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4wgtt" podUID="b7b0785d-0c62-4fef-83aa-a9d32e9d388b" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.563698 4943 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.610298 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hkg6f"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.611158 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.611687 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z2hf5"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.612188 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.612216 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.612713 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.614238 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.614682 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.617022 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.617384 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.617633 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.617999 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.618378 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.618737 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.619016 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.619251 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.619525 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.619708 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.619762 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.620069 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.620280 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.620334 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.622365 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.622670 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.623265 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.623752 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-8wxhh"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.624067 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.624398 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.624671 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.625143 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.626935 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.627442 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-v52hz"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.627860 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.628017 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.628490 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.628529 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.634649 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.635212 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-prf24"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.635266 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.635815 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.646255 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.650016 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.650419 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.650448 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.651244 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.652739 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.654486 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.654958 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z2b2d"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.674259 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.674632 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:00 crc kubenswrapper[4943]: W1129 06:36:00.675292 4943 reflector.go:561] object-"openshift-image-registry"/"image-registry-operator-tls": failed to list *v1.Secret: secrets "image-registry-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 29 06:36:00 crc kubenswrapper[4943]: E1129 06:36:00.675330 4943 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"image-registry-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"image-registry-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675298 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675356 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675401 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: W1129 06:36:00.675536 4943 reflector.go:561] object-"openshift-image-registry"/"trusted-ca": failed to list *v1.ConfigMap: configmaps "trusted-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 29 06:36:00 crc kubenswrapper[4943]: E1129 06:36:00.675556 4943 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"trusted-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675648 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675670 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675740 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 29 06:36:00 crc kubenswrapper[4943]: W1129 06:36:00.675825 4943 reflector.go:561] object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx": failed to list *v1.Secret: secrets "cluster-image-registry-operator-dockercfg-m4qtx" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-image-registry": no relationship found between node 'crc' and this object Nov 29 06:36:00 crc kubenswrapper[4943]: E1129 06:36:00.675841 4943 reflector.go:158] "Unhandled Error" err="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-m4qtx\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cluster-image-registry-operator-dockercfg-m4qtx\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-image-registry\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675828 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675950 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676028 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676045 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676155 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676309 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676381 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676407 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676816 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676911 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.676978 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677043 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677065 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677071 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677137 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.675950 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677420 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677531 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677569 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677775 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677843 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677899 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.677936 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678004 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678050 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678109 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678137 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678209 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678318 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678384 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678488 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678602 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678689 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678703 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.678805 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.679036 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.679256 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.679393 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.679519 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.679611 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.680148 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.680394 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.680194 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.681328 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.681982 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cqbvm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.682235 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.682317 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.682733 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.682980 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.690942 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.691065 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.691119 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.691203 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.691237 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.693992 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.694747 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.698446 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x5kzr"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.700401 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.700760 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.700873 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.703097 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.703489 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.703775 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.704542 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.704708 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.724501 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.724942 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.729868 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.731183 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.731345 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.731238 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.731263 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.752295 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.752568 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.752775 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.752848 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753025 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753505 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8405beb0-21cf-44f5-9979-3488c214d762-trusted-ca\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753539 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-images\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753589 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8scvd\" (UniqueName: \"kubernetes.io/projected/a1dadabe-2740-4883-81c1-ea20746772f9-kube-api-access-8scvd\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753515 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753618 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-bound-sa-token\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753640 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/afc2af4e-e064-41fd-8fcf-e184be168a9a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753770 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753820 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753843 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8405beb0-21cf-44f5-9979-3488c214d762-metrics-tls\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753860 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45rn6\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-kube-api-access-45rn6\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.753972 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-service-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754025 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754068 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hqgb\" (UniqueName: \"kubernetes.io/projected/afc2af4e-e064-41fd-8fcf-e184be168a9a-kube-api-access-9hqgb\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754122 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754127 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754167 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dadabe-2740-4883-81c1-ea20746772f9-serving-cert\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-config\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754291 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-config\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754309 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.754329 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgfc7\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-kube-api-access-bgfc7\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.755300 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.755586 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-48qmb"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.756224 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.756822 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.757127 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.757784 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.757921 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.758246 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.758680 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.759192 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.759792 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.763040 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-282cm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.763758 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.764540 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.765044 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.766203 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.766488 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-h4mrx"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.766962 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.767125 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.768347 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.768869 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.772219 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.772446 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.772754 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.773274 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.773845 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.773882 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.776633 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777018 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777359 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777614 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777621 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777615 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.777981 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.778184 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.778209 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.778522 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.778883 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.780801 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vdxv4"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.781398 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.781581 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.781840 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z2hf5"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.781909 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.783941 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.784221 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hkg6f"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.785238 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.787854 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.789771 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.790177 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.796799 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-v52hz"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.796929 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.800099 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.803679 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.805309 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-8wxhh"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.806777 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z2b2d"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.808329 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.808352 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-prf24"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.809612 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.810948 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-2v9bf"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.811813 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-282cm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.812022 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.813417 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-48qmb"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.814187 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.814220 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.815738 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.816320 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.817257 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-fzrkw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.818995 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-4txpk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.819457 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.820652 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.820933 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.821739 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.822556 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.823943 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.825081 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.826798 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cqbvm"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.827651 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vdxv4"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.830928 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.830951 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4txpk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.831704 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.833058 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.833165 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x5kzr"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.834114 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.835355 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-fzrkw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.836482 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.840326 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.841894 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.842941 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.844565 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.845826 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-m2skw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.847011 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m2skw"] Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.847077 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.854077 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.854870 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-bound-sa-token\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.854959 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/afc2af4e-e064-41fd-8fcf-e184be168a9a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855027 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855232 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855289 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8405beb0-21cf-44f5-9979-3488c214d762-metrics-tls\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855315 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45rn6\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-kube-api-access-45rn6\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855369 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-service-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855394 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hqgb\" (UniqueName: \"kubernetes.io/projected/afc2af4e-e064-41fd-8fcf-e184be168a9a-kube-api-access-9hqgb\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855409 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855435 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dadabe-2740-4883-81c1-ea20746772f9-serving-cert\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855454 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-config\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855469 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-config\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855484 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855502 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgfc7\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-kube-api-access-bgfc7\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855529 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8405beb0-21cf-44f5-9979-3488c214d762-trusted-ca\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-images\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.855601 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8scvd\" (UniqueName: \"kubernetes.io/projected/a1dadabe-2740-4883-81c1-ea20746772f9-kube-api-access-8scvd\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.856315 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.856552 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-service-ca-bundle\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.856895 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dadabe-2740-4883-81c1-ea20746772f9-config\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.857016 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-images\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.857062 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afc2af4e-e064-41fd-8fcf-e184be168a9a-config\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.857369 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8405beb0-21cf-44f5-9979-3488c214d762-trusted-ca\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.860056 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/afc2af4e-e064-41fd-8fcf-e184be168a9a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.860386 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dadabe-2740-4883-81c1-ea20746772f9-serving-cert\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.861230 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8405beb0-21cf-44f5-9979-3488c214d762-metrics-tls\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.873405 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.893644 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.912967 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.933093 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.954069 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.974124 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 29 06:36:00 crc kubenswrapper[4943]: I1129 06:36:00.993535 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.013142 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.033003 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.054593 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.073647 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.113960 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.133122 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.153991 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157418 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157452 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwdw7\" (UniqueName: \"kubernetes.io/projected/dab93fb0-80fd-4bb3-aa09-e95434e7354b-kube-api-access-vwdw7\") pod \"downloads-7954f5f757-v52hz\" (UID: \"dab93fb0-80fd-4bb3-aa09-e95434e7354b\") " pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157473 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157493 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35d86102-0e39-4f45-b7b6-c41f9f5daf06-serving-cert\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157510 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157529 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp8n4\" (UniqueName: \"kubernetes.io/projected/f836e7f7-5926-4744-8a73-af83fedb06cd-kube-api-access-cp8n4\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157547 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157631 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157674 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157700 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-client\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157729 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157757 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42mdf\" (UniqueName: \"kubernetes.io/projected/35d86102-0e39-4f45-b7b6-c41f9f5daf06-kube-api-access-42mdf\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157778 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit-dir\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157818 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8kng\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157861 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x49k\" (UniqueName: \"kubernetes.io/projected/b98074c8-0098-4d89-899a-948d21ef3500-kube-api-access-9x49k\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157898 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157924 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6ncq\" (UniqueName: \"kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157948 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157970 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwttv\" (UniqueName: \"kubernetes.io/projected/ddf860a2-14da-473d-97e4-c5b2a28828c1-kube-api-access-zwttv\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.157992 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158017 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b98074c8-0098-4d89-899a-948d21ef3500-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158044 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55bhh\" (UniqueName: \"kubernetes.io/projected/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-kube-api-access-55bhh\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158068 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158093 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158117 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-dir\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158144 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158177 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158201 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-trusted-ca\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158226 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-encryption-config\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158246 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-encryption-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158306 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158352 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158379 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158404 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158418 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158439 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f836e7f7-5926-4744-8a73-af83fedb06cd-machine-approver-tls\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158455 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158470 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158486 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158521 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158551 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158644 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158671 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158694 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b98074c8-0098-4d89-899a-948d21ef3500-config\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158716 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158736 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-image-import-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158759 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158783 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-auth-proxy-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158804 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-452vx\" (UniqueName: \"kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158833 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-serving-cert\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158871 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158894 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-serving-cert\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158913 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158951 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.158996 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159032 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159059 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-node-pullsecrets\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159082 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159107 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159142 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-policies\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159164 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-client\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159185 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ghf9\" (UniqueName: \"kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159208 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159232 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159254 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159275 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-config\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159297 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qff9\" (UniqueName: \"kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.159378 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.159414 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.659394795 +0000 UTC m=+136.589483678 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.173754 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.194372 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.212889 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.233707 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.252784 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260353 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.260430 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.760412727 +0000 UTC m=+136.690501480 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260486 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260523 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7f8d\" (UniqueName: \"kubernetes.io/projected/655ce663-9a0e-4ce9-bdaf-e614234ac533-kube-api-access-c7f8d\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260553 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-serving-cert\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260583 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5563de-5269-435a-8e95-168e43531454-config\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260599 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fpml\" (UniqueName: \"kubernetes.io/projected/bbc03c24-61f2-4009-9aa8-09a526122701-kube-api-access-6fpml\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260615 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260632 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260660 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-registration-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260837 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/346fc9d4-63c9-4c45-833c-b45822a13b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260895 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bbc03c24-61f2-4009-9aa8-09a526122701-signing-key\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260941 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.260977 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261013 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-proxy-tls\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261111 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261200 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-policies\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261244 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-serving-cert\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.261282 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.761256871 +0000 UTC m=+136.691345614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261496 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261544 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261587 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261618 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x2gg\" (UniqueName: \"kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261641 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261676 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261700 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-client\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261723 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261744 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vshf\" (UniqueName: \"kubernetes.io/projected/a9c30534-72b1-4990-bd05-1b9e0e7677c6-kube-api-access-8vshf\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261769 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp8n4\" (UniqueName: \"kubernetes.io/projected/f836e7f7-5926-4744-8a73-af83fedb06cd-kube-api-access-cp8n4\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261790 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036ea3a8-bb27-4f18-9438-ed26ae51833c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261811 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0469087-9878-428e-b24e-e1f31f6f8848-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261833 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-client\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261853 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261864 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-policies\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261875 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf85r\" (UniqueName: \"kubernetes.io/projected/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-kube-api-access-wf85r\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261925 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261947 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cql88\" (UniqueName: \"kubernetes.io/projected/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-kube-api-access-cql88\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261967 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-config\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.261989 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42mdf\" (UniqueName: \"kubernetes.io/projected/35d86102-0e39-4f45-b7b6-c41f9f5daf06-kube-api-access-42mdf\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262005 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit-dir\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262023 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8kng\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262039 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x49k\" (UniqueName: \"kubernetes.io/projected/b98074c8-0098-4d89-899a-948d21ef3500-kube-api-access-9x49k\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262061 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6ncq\" (UniqueName: \"kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262078 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262095 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44zsr\" (UniqueName: \"kubernetes.io/projected/642bec88-869e-42da-baa9-47e98f95ba17-kube-api-access-44zsr\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262111 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262127 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f343b01f-4fea-4964-a4ac-3a6968e65967-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262143 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55bhh\" (UniqueName: \"kubernetes.io/projected/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-kube-api-access-55bhh\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262158 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262174 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262190 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0469087-9878-428e-b24e-e1f31f6f8848-config\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262204 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzm5m\" (UniqueName: \"kubernetes.io/projected/d20623eb-726b-4684-a8f4-3358d6a1c7fd-kube-api-access-xzm5m\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262221 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phfc5\" (UniqueName: \"kubernetes.io/projected/8584d24e-b420-4eca-8947-081c7d6c69c7-kube-api-access-phfc5\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262237 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g74p9\" (UniqueName: \"kubernetes.io/projected/036ea3a8-bb27-4f18-9438-ed26ae51833c-kube-api-access-g74p9\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262256 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ncfc\" (UniqueName: \"kubernetes.io/projected/346fc9d4-63c9-4c45-833c-b45822a13b4b-kube-api-access-5ncfc\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262279 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262294 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-stats-auth\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262311 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-encryption-config\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262328 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cf402561-e803-41f4-9b0a-caf00944c023-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262344 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-metrics-tls\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262360 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn6fg\" (UniqueName: \"kubernetes.io/projected/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-kube-api-access-jn6fg\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262377 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262394 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262414 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262430 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262901 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.262949 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit-dir\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263045 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-config\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263073 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjlgr\" (UniqueName: \"kubernetes.io/projected/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-kube-api-access-zjlgr\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263080 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263095 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263107 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263306 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f343b01f-4fea-4964-a4ac-3a6968e65967-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263330 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0469087-9878-428e-b24e-e1f31f6f8848-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263360 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263382 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263398 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b45167-2db2-48c1-8776-4edc3ecb9ffb-service-ca-bundle\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263413 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-mountpoint-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263428 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac9cc53-448a-40b4-929e-5d13a9ebb883-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263458 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263474 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b98074c8-0098-4d89-899a-948d21ef3500-config\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263492 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263508 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/655ce663-9a0e-4ce9-bdaf-e614234ac533-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263546 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c5563de-5269-435a-8e95-168e43531454-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263567 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qfrz\" (UniqueName: \"kubernetes.io/projected/09b45167-2db2-48c1-8776-4edc3ecb9ffb-kube-api-access-7qfrz\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.263610 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264403 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264661 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264683 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-452vx\" (UniqueName: \"kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264728 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-srv-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264749 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-metrics-certs\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264769 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-serving-cert\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264808 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264826 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-plugins-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264841 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.264965 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmlrr\" (UniqueName: \"kubernetes.io/projected/b0fc5c3c-5c5c-4f45-8672-a097373973d0-kube-api-access-jmlrr\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265004 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bbc03c24-61f2-4009-9aa8-09a526122701-signing-cabundle\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265092 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265141 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/69a44c9d-8342-4b54-a619-02abab39ecf6-kube-api-access-c8jdk\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265160 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-node-pullsecrets\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265180 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-serving-cert\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265197 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/346fc9d4-63c9-4c45-833c-b45822a13b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265217 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265234 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-client\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265249 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265265 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z46c\" (UniqueName: \"kubernetes.io/projected/2486e195-c853-4f92-a92b-8123812a01d5-kube-api-access-5z46c\") pod \"migrator-59844c95c7-6dflg\" (UID: \"2486e195-c853-4f92-a92b-8123812a01d5\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265280 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-default-certificate\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265296 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-csi-data-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265349 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-serving-cert\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265441 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265501 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ghf9\" (UniqueName: \"kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265537 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-config\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265591 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qff9\" (UniqueName: \"kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265625 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rk2p\" (UniqueName: \"kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265680 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac9cc53-448a-40b4-929e-5d13a9ebb883-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265796 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265839 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwdw7\" (UniqueName: \"kubernetes.io/projected/dab93fb0-80fd-4bb3-aa09-e95434e7354b-kube-api-access-vwdw7\") pod \"downloads-7954f5f757-v52hz\" (UID: \"dab93fb0-80fd-4bb3-aa09-e95434e7354b\") " pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265884 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265894 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265952 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.265970 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266002 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btlmp\" (UniqueName: \"kubernetes.io/projected/cf402561-e803-41f4-9b0a-caf00944c023-kube-api-access-btlmp\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266035 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztpjw\" (UniqueName: \"kubernetes.io/projected/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-kube-api-access-ztpjw\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266093 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266085 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ddf860a2-14da-473d-97e4-c5b2a28828c1-node-pullsecrets\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266119 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35d86102-0e39-4f45-b7b6-c41f9f5daf06-serving-cert\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266155 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.266963 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b98074c8-0098-4d89-899a-948d21ef3500-config\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267199 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267378 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267411 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267435 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267495 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8584d24e-b420-4eca-8947-081c7d6c69c7-proxy-tls\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267518 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnzbn\" (UniqueName: \"kubernetes.io/projected/3ac9cc53-448a-40b4-929e-5d13a9ebb883-kube-api-access-vnzbn\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267543 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c5563de-5269-435a-8e95-168e43531454-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267559 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-srv-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267607 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267653 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnskj\" (UniqueName: \"kubernetes.io/projected/8f722787-3b10-475b-a9ff-4c84d8de3d34-kube-api-access-rnskj\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267720 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267749 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267776 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwttv\" (UniqueName: \"kubernetes.io/projected/ddf860a2-14da-473d-97e4-c5b2a28828c1-kube-api-access-zwttv\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267801 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b0fc5c3c-5c5c-4f45-8672-a097373973d0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267824 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-socket-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267848 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b98074c8-0098-4d89-899a-948d21ef3500-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267871 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f343b01f-4fea-4964-a4ac-3a6968e65967-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267896 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.267921 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-dir\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.268079 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.268153 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-config\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.269098 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.269215 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.269352 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-etcd-client\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.269434 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.269918 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270116 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270346 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-serving-cert\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270381 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270904 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270930 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.270967 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-encryption-config\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271042 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271098 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-audit-dir\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271125 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-trusted-ca\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271172 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271136 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271239 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271251 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271271 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271326 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-encryption-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271353 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271464 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271541 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-webhook-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271604 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f836e7f7-5926-4744-8a73-af83fedb06cd-machine-approver-tls\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271660 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271687 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271719 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.271733 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272088 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272137 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272206 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272376 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35d86102-0e39-4f45-b7b6-c41f9f5daf06-trusted-ca\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272558 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272733 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/036ea3a8-bb27-4f18-9438-ed26ae51833c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272772 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272808 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-service-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272897 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.272927 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-audit\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273008 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxf2f\" (UniqueName: \"kubernetes.io/projected/c016108a-e89e-4bf3-97b2-fb9344746be8-kube-api-access-cxf2f\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273288 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273344 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-images\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273373 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s6cq\" (UniqueName: \"kubernetes.io/projected/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-kube-api-access-5s6cq\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273402 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-auth-proxy-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273450 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-image-import-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273482 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273515 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d20623eb-726b-4684-a8f4-3358d6a1c7fd-tmpfs\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.273662 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274054 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f836e7f7-5926-4744-8a73-af83fedb06cd-machine-approver-tls\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274224 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ddf860a2-14da-473d-97e4-c5b2a28828c1-encryption-config\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274334 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f836e7f7-5926-4744-8a73-af83fedb06cd-auth-proxy-config\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274863 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ddf860a2-14da-473d-97e4-c5b2a28828c1-image-import-ca\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.274880 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.275055 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.275143 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.275494 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.276022 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-etcd-client\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.276543 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35d86102-0e39-4f45-b7b6-c41f9f5daf06-serving-cert\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.276981 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.277499 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.280887 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b98074c8-0098-4d89-899a-948d21ef3500-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.295171 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.313869 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.326760 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.326848 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.326990 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.326769 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.333895 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.353152 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.373257 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374081 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.374187 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.874166712 +0000 UTC m=+136.804255475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374330 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374380 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44zsr\" (UniqueName: \"kubernetes.io/projected/642bec88-869e-42da-baa9-47e98f95ba17-kube-api-access-44zsr\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374425 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f343b01f-4fea-4964-a4ac-3a6968e65967-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374469 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0469087-9878-428e-b24e-e1f31f6f8848-config\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374507 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzm5m\" (UniqueName: \"kubernetes.io/projected/d20623eb-726b-4684-a8f4-3358d6a1c7fd-kube-api-access-xzm5m\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374541 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g74p9\" (UniqueName: \"kubernetes.io/projected/036ea3a8-bb27-4f18-9438-ed26ae51833c-kube-api-access-g74p9\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374602 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ncfc\" (UniqueName: \"kubernetes.io/projected/346fc9d4-63c9-4c45-833c-b45822a13b4b-kube-api-access-5ncfc\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374636 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phfc5\" (UniqueName: \"kubernetes.io/projected/8584d24e-b420-4eca-8947-081c7d6c69c7-kube-api-access-phfc5\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374686 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cf402561-e803-41f4-9b0a-caf00944c023-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374720 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-stats-auth\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374757 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn6fg\" (UniqueName: \"kubernetes.io/projected/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-kube-api-access-jn6fg\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374791 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-metrics-tls\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374825 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374860 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-config\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjlgr\" (UniqueName: \"kubernetes.io/projected/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-kube-api-access-zjlgr\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.374952 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f343b01f-4fea-4964-a4ac-3a6968e65967-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375001 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375082 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0469087-9878-428e-b24e-e1f31f6f8848-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375126 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b45167-2db2-48c1-8776-4edc3ecb9ffb-service-ca-bundle\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375163 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-mountpoint-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375195 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375228 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/655ce663-9a0e-4ce9-bdaf-e614234ac533-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375260 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-mountpoint-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375262 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac9cc53-448a-40b4-929e-5d13a9ebb883-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375315 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qfrz\" (UniqueName: \"kubernetes.io/projected/09b45167-2db2-48c1-8776-4edc3ecb9ffb-kube-api-access-7qfrz\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375343 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375379 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c5563de-5269-435a-8e95-168e43531454-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375403 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-metrics-certs\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375426 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-srv-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375449 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-plugins-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375474 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmlrr\" (UniqueName: \"kubernetes.io/projected/b0fc5c3c-5c5c-4f45-8672-a097373973d0-kube-api-access-jmlrr\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375496 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bbc03c24-61f2-4009-9aa8-09a526122701-signing-cabundle\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375541 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/69a44c9d-8342-4b54-a619-02abab39ecf6-kube-api-access-c8jdk\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375591 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-serving-cert\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375616 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/346fc9d4-63c9-4c45-833c-b45822a13b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375642 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375664 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-default-certificate\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375686 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-csi-data-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375696 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f343b01f-4fea-4964-a4ac-3a6968e65967-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375720 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z46c\" (UniqueName: \"kubernetes.io/projected/2486e195-c853-4f92-a92b-8123812a01d5-kube-api-access-5z46c\") pod \"migrator-59844c95c7-6dflg\" (UID: \"2486e195-c853-4f92-a92b-8123812a01d5\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375756 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rk2p\" (UniqueName: \"kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375787 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac9cc53-448a-40b4-929e-5d13a9ebb883-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375809 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-plugins-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.375812 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btlmp\" (UniqueName: \"kubernetes.io/projected/cf402561-e803-41f4-9b0a-caf00944c023-kube-api-access-btlmp\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376100 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376170 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztpjw\" (UniqueName: \"kubernetes.io/projected/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-kube-api-access-ztpjw\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376212 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnzbn\" (UniqueName: \"kubernetes.io/projected/3ac9cc53-448a-40b4-929e-5d13a9ebb883-kube-api-access-vnzbn\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376244 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c5563de-5269-435a-8e95-168e43531454-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376273 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-srv-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376301 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376335 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8584d24e-b420-4eca-8947-081c7d6c69c7-proxy-tls\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376368 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnskj\" (UniqueName: \"kubernetes.io/projected/8f722787-3b10-475b-a9ff-4c84d8de3d34-kube-api-access-rnskj\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376403 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376449 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b0fc5c3c-5c5c-4f45-8672-a097373973d0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376481 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-socket-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376514 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f343b01f-4fea-4964-a4ac-3a6968e65967-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.376518 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-csi-data-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378323 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378390 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-webhook-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378509 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378599 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/036ea3a8-bb27-4f18-9438-ed26ae51833c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378623 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/346fc9d4-63c9-4c45-833c-b45822a13b4b-serving-cert\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378641 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-service-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378677 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxf2f\" (UniqueName: \"kubernetes.io/projected/c016108a-e89e-4bf3-97b2-fb9344746be8-kube-api-access-cxf2f\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378707 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-images\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378726 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-socket-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378731 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d20623eb-726b-4684-a8f4-3358d6a1c7fd-tmpfs\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s6cq\" (UniqueName: \"kubernetes.io/projected/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-kube-api-access-5s6cq\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378797 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7f8d\" (UniqueName: \"kubernetes.io/projected/655ce663-9a0e-4ce9-bdaf-e614234ac533-kube-api-access-c7f8d\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378831 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378841 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5563de-5269-435a-8e95-168e43531454-config\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378870 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fpml\" (UniqueName: \"kubernetes.io/projected/bbc03c24-61f2-4009-9aa8-09a526122701-kube-api-access-6fpml\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378895 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378919 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378952 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/346fc9d4-63c9-4c45-833c-b45822a13b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378973 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bbc03c24-61f2-4009-9aa8-09a526122701-signing-key\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378996 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-registration-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379021 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379044 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379067 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-proxy-tls\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379097 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-serving-cert\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379124 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x2gg\" (UniqueName: \"kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379146 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379178 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379203 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-client\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379226 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379229 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d20623eb-726b-4684-a8f4-3358d6a1c7fd-tmpfs\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379248 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379258 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036ea3a8-bb27-4f18-9438-ed26ae51833c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379283 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vshf\" (UniqueName: \"kubernetes.io/projected/a9c30534-72b1-4990-bd05-1b9e0e7677c6-kube-api-access-8vshf\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379311 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf85r\" (UniqueName: \"kubernetes.io/projected/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-kube-api-access-wf85r\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379379 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0469087-9878-428e-b24e-e1f31f6f8848-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379393 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-service-ca\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379407 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cql88\" (UniqueName: \"kubernetes.io/projected/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-kube-api-access-cql88\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378602 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/655ce663-9a0e-4ce9-bdaf-e614234ac533-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.379433 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-config\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.378179 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69a44c9d-8342-4b54-a619-02abab39ecf6-config\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.380284 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-serving-cert\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.380710 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.380741 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-metrics-tls\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.380747 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.380988 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036ea3a8-bb27-4f18-9438-ed26ae51833c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.381061 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-srv-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.381079 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-registration-dir\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.381339 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.881320702 +0000 UTC m=+136.811409565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.381597 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b0fc5c3c-5c5c-4f45-8672-a097373973d0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.381665 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/346fc9d4-63c9-4c45-833c-b45822a13b4b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.382761 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9c30534-72b1-4990-bd05-1b9e0e7677c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.383711 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f343b01f-4fea-4964-a4ac-3a6968e65967-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.383934 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-proxy-tls\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.384319 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a44c9d-8342-4b54-a619-02abab39ecf6-etcd-client\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.386984 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/036ea3a8-bb27-4f18-9438-ed26ae51833c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.393218 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.413923 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.433467 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.453366 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.461131 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c5563de-5269-435a-8e95-168e43531454-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.473335 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.480112 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.480366 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.980327245 +0000 UTC m=+136.910416018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.480471 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.480903 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:01.980891482 +0000 UTC m=+136.910980245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.481851 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5563de-5269-435a-8e95-168e43531454-config\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.493514 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.512833 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.533185 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.538235 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cf402561-e803-41f4-9b0a-caf00944c023-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.553029 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.573683 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.582342 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.582662 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.082628035 +0000 UTC m=+137.012716828 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.583676 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.584164 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.08414888 +0000 UTC m=+137.014237673 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.593919 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.613712 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.618207 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0469087-9878-428e-b24e-e1f31f6f8848-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.633478 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.635903 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0469087-9878-428e-b24e-e1f31f6f8848-config\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.654142 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.660323 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8584d24e-b420-4eca-8947-081c7d6c69c7-images\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.673557 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.705358 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.706061 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.206043794 +0000 UTC m=+137.136132547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.707672 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.713548 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.713905 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8584d24e-b420-4eca-8947-081c7d6c69c7-proxy-tls\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.717140 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac9cc53-448a-40b4-929e-5d13a9ebb883-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.733686 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.757073 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.763070 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac9cc53-448a-40b4-929e-5d13a9ebb883-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.772420 4943 request.go:700] Waited for 1.00708325s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.774549 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.792702 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.807347 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.807720 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.307707275 +0000 UTC m=+137.237796028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.813673 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.837157 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.845515 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-metrics-certs\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.854113 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.855301 4943 secret.go:188] Couldn't get secret openshift-image-registry/image-registry-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.855431 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls podName:99933cfb-96e8-4fc3-a9aa-291b306760f6 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.355397144 +0000 UTC m=+137.285485937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls") pod "cluster-image-registry-operator-dc59b4c8b-b5sg2" (UID: "99933cfb-96e8-4fc3-a9aa-291b306760f6") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.855990 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09b45167-2db2-48c1-8776-4edc3ecb9ffb-service-ca-bundle\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.856691 4943 configmap.go:193] Couldn't get configMap openshift-image-registry/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.856749 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca podName:99933cfb-96e8-4fc3-a9aa-291b306760f6 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.356733633 +0000 UTC m=+137.286822386 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca") pod "cluster-image-registry-operator-dc59b4c8b-b5sg2" (UID: "99933cfb-96e8-4fc3-a9aa-291b306760f6") : failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.873789 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.893761 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.908673 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.908961 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.408947354 +0000 UTC m=+137.339036107 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.909112 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:01 crc kubenswrapper[4943]: E1129 06:36:01.909636 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.409610913 +0000 UTC m=+137.339699696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.913460 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.918539 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-stats-auth\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.933777 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.942280 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/09b45167-2db2-48c1-8776-4edc3ecb9ffb-default-certificate\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.953340 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.974153 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 29 06:36:01 crc kubenswrapper[4943]: I1129 06:36:01.993803 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.010264 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.010483 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.510440739 +0000 UTC m=+137.440529522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.011475 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.011962 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.511942334 +0000 UTC m=+137.442031117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.013380 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.018975 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c016108a-e89e-4bf3-97b2-fb9344746be8-srv-cert\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.033918 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.044989 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.053862 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.074031 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.094958 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.112015 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.112197 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.612175883 +0000 UTC m=+137.542264646 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.112243 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.112616 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.612605455 +0000 UTC m=+137.542694218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.113497 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.121813 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.133515 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.144242 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-webhook-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.144947 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d20623eb-726b-4684-a8f4-3358d6a1c7fd-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.160251 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.171920 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.194263 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.213632 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.213778 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.713759172 +0000 UTC m=+137.643847935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.213801 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.213843 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.214186 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.714175704 +0000 UTC m=+137.644264457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.234229 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.241789 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-config\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.253854 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.273239 4943 configmap.go:193] Couldn't get configMap openshift-image-registry/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.273329 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca podName:3049b3ed-f405-4ecc-ade1-ad9753e53c1d nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.773309288 +0000 UTC m=+137.703398041 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.273845 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.283993 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-serving-cert\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.293680 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.304622 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bbc03c24-61f2-4009-9aa8-09a526122701-signing-key\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.313153 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.315644 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.315763 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.815735413 +0000 UTC m=+137.745824176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.316383 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.316699 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.81668921 +0000 UTC m=+137.746777963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.317428 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bbc03c24-61f2-4009-9aa8-09a526122701-signing-cabundle\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.333185 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.352905 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.373069 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375333 4943 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375375 4943 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375413 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert podName:8f722787-3b10-475b-a9ff-4c84d8de3d34 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.875387681 +0000 UTC m=+137.805476444 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert") pod "ingress-canary-m2skw" (UID: "8f722787-3b10-475b-a9ff-4c84d8de3d34") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375436 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume podName:a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.875426142 +0000 UTC m=+137.805514915 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume") pod "dns-default-4txpk" (UID: "a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f") : failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375460 4943 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.375548 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume podName:4af90184-bb3b-455d-a9dc-9e120c08b3c7 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.875527105 +0000 UTC m=+137.805615868 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume") pod "collect-profiles-29406630-5fztj" (UID: "4af90184-bb3b-455d-a9dc-9e120c08b3c7") : failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.376612 4943 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.376672 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token podName:642bec88-869e-42da-baa9-47e98f95ba17 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.876657068 +0000 UTC m=+137.806745841 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token") pod "machine-config-server-2v9bf" (UID: "642bec88-869e-42da-baa9-47e98f95ba17") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.380339 4943 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.380436 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs podName:642bec88-869e-42da-baa9-47e98f95ba17 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.880417299 +0000 UTC m=+137.810506072 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs") pod "machine-config-server-2v9bf" (UID: "642bec88-869e-42da-baa9-47e98f95ba17") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.382540 4943 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.382620 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls podName:a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.882606133 +0000 UTC m=+137.812694986 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls") pod "dns-default-4txpk" (UID: "a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.394168 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.414089 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.417508 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.417894 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.418210 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:02.918191067 +0000 UTC m=+137.848279820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.418444 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.434301 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.454097 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.474049 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.494417 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.514420 4943 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.519743 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.520207 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.020191188 +0000 UTC m=+137.950280041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.534071 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.554770 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.574797 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.593698 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.613817 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.621010 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.621163 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.121139628 +0000 UTC m=+138.051228381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.621300 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.622255 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.122101665 +0000 UTC m=+138.052190418 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.633615 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.654381 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.673862 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.707889 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-bound-sa-token\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.722315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.722523 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.222491929 +0000 UTC m=+138.152580692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.722898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.723248 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.223240492 +0000 UTC m=+138.153329245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.727717 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.746942 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8scvd\" (UniqueName: \"kubernetes.io/projected/a1dadabe-2740-4883-81c1-ea20746772f9-kube-api-access-8scvd\") pod \"authentication-operator-69f744f599-8wxhh\" (UID: \"a1dadabe-2740-4883-81c1-ea20746772f9\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.769173 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45rn6\" (UniqueName: \"kubernetes.io/projected/99933cfb-96e8-4fc3-a9aa-291b306760f6-kube-api-access-45rn6\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.772550 4943 request.go:700] Waited for 1.916110017s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/serviceaccounts/ingress-operator/token Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.790147 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgfc7\" (UniqueName: \"kubernetes.io/projected/8405beb0-21cf-44f5-9979-3488c214d762-kube-api-access-bgfc7\") pod \"ingress-operator-5b745b69d9-prf24\" (UID: \"8405beb0-21cf-44f5-9979-3488c214d762\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.809630 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hqgb\" (UniqueName: \"kubernetes.io/projected/afc2af4e-e064-41fd-8fcf-e184be168a9a-kube-api-access-9hqgb\") pod \"machine-api-operator-5694c8668f-z2hf5\" (UID: \"afc2af4e-e064-41fd-8fcf-e184be168a9a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.824210 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.824449 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.324427029 +0000 UTC m=+138.254515782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.824627 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.824728 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.825048 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.325036286 +0000 UTC m=+138.255125039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.827089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp8n4\" (UniqueName: \"kubernetes.io/projected/f836e7f7-5926-4744-8a73-af83fedb06cd-kube-api-access-cp8n4\") pod \"machine-approver-56656f9798-7z22q\" (UID: \"f836e7f7-5926-4744-8a73-af83fedb06cd\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.847669 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42mdf\" (UniqueName: \"kubernetes.io/projected/35d86102-0e39-4f45-b7b6-c41f9f5daf06-kube-api-access-42mdf\") pod \"console-operator-58897d9998-z2b2d\" (UID: \"35d86102-0e39-4f45-b7b6-c41f9f5daf06\") " pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.871532 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8kng\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.877898 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.887340 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x49k\" (UniqueName: \"kubernetes.io/projected/b98074c8-0098-4d89-899a-948d21ef3500-kube-api-access-9x49k\") pod \"openshift-apiserver-operator-796bbdcf4f-h65pr\" (UID: \"b98074c8-0098-4d89-899a-948d21ef3500\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.910053 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.925847 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.927073 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.927330 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.927494 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.927608 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.927847 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.42780909 +0000 UTC m=+138.357897853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.927959 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.928160 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.928195 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.928238 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.928342 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-config-volume\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.928377 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:02 crc kubenswrapper[4943]: E1129 06:36:02.928581 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.428549091 +0000 UTC m=+138.358637924 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.930749 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55bhh\" (UniqueName: \"kubernetes.io/projected/9eddac8b-ec06-4ec0-9b84-541a6361a9fa-kube-api-access-55bhh\") pod \"apiserver-7bbb656c7d-2sdx6\" (UID: \"9eddac8b-ec06-4ec0-9b84-541a6361a9fa\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.934090 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-metrics-tls\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.934417 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f722787-3b10-475b-a9ff-4c84d8de3d34-cert\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.934683 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-node-bootstrap-token\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.936179 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/642bec88-869e-42da-baa9-47e98f95ba17-certs\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.937799 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.944539 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.948727 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6ncq\" (UniqueName: \"kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq\") pod \"console-f9d7485db-jxf26\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.951186 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.972032 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-452vx\" (UniqueName: \"kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx\") pod \"controller-manager-879f6c89f-t7n65\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:02 crc kubenswrapper[4943]: I1129 06:36:02.993232 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ghf9\" (UniqueName: \"kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9\") pod \"oauth-openshift-558db77b4-sdcdn\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.012344 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qff9\" (UniqueName: \"kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9\") pod \"route-controller-manager-6576b87f9c-hwkpw\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.029533 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.029784 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.529752189 +0000 UTC m=+138.459840952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.030033 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.030670 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.530659916 +0000 UTC m=+138.460748669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.031833 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwdw7\" (UniqueName: \"kubernetes.io/projected/dab93fb0-80fd-4bb3-aa09-e95434e7354b-kube-api-access-vwdw7\") pod \"downloads-7954f5f757-v52hz\" (UID: \"dab93fb0-80fd-4bb3-aa09-e95434e7354b\") " pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.052894 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwttv\" (UniqueName: \"kubernetes.io/projected/ddf860a2-14da-473d-97e4-c5b2a28828c1-kube-api-access-zwttv\") pod \"apiserver-76f77b778f-hkg6f\" (UID: \"ddf860a2-14da-473d-97e4-c5b2a28828c1\") " pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.056160 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.075490 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.075757 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.084702 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.094307 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.098715 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.118863 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.120974 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-8wxhh"] Nov 29 06:36:03 crc kubenswrapper[4943]: W1129 06:36:03.127882 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf836e7f7_5926_4744_8a73_af83fedb06cd.slice/crio-6bc6a9db28b0f05572afd3816297931d5f0046bed057935c26f4dc84ee8bf8cf WatchSource:0}: Error finding container 6bc6a9db28b0f05572afd3816297931d5f0046bed057935c26f4dc84ee8bf8cf: Status 404 returned error can't find the container with id 6bc6a9db28b0f05572afd3816297931d5f0046bed057935c26f4dc84ee8bf8cf Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.128631 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:03 crc kubenswrapper[4943]: W1129 06:36:03.129879 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1dadabe_2740_4883_81c1_ea20746772f9.slice/crio-e70bdea6319191f27a5b241084c4a7b1abb24d3707755d35b99dad88b38bd432 WatchSource:0}: Error finding container e70bdea6319191f27a5b241084c4a7b1abb24d3707755d35b99dad88b38bd432: Status 404 returned error can't find the container with id e70bdea6319191f27a5b241084c4a7b1abb24d3707755d35b99dad88b38bd432 Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.131218 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.131357 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.631335418 +0000 UTC m=+138.561424171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.131727 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.133260 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.633245544 +0000 UTC m=+138.563334297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.134543 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.154877 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.208289 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.208400 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.209797 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44zsr\" (UniqueName: \"kubernetes.io/projected/642bec88-869e-42da-baa9-47e98f95ba17-kube-api-access-44zsr\") pod \"machine-config-server-2v9bf\" (UID: \"642bec88-869e-42da-baa9-47e98f95ba17\") " pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.213153 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.214009 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f343b01f-4fea-4964-a4ac-3a6968e65967-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5ckgg\" (UID: \"f343b01f-4fea-4964-a4ac-3a6968e65967\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.234130 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.234608 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.734579546 +0000 UTC m=+138.664668329 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.238292 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g74p9\" (UniqueName: \"kubernetes.io/projected/036ea3a8-bb27-4f18-9438-ed26ae51833c-kube-api-access-g74p9\") pod \"openshift-controller-manager-operator-756b6f6bc6-4nd4v\" (UID: \"036ea3a8-bb27-4f18-9438-ed26ae51833c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.262961 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzm5m\" (UniqueName: \"kubernetes.io/projected/d20623eb-726b-4684-a8f4-3358d6a1c7fd-kube-api-access-xzm5m\") pod \"packageserver-d55dfcdfc-wzqp2\" (UID: \"d20623eb-726b-4684-a8f4-3358d6a1c7fd\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.275504 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phfc5\" (UniqueName: \"kubernetes.io/projected/8584d24e-b420-4eca-8947-081c7d6c69c7-kube-api-access-phfc5\") pod \"machine-config-operator-74547568cd-282cm\" (UID: \"8584d24e-b420-4eca-8947-081c7d6c69c7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.276914 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.282870 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.295929 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z2b2d"] Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.297217 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ncfc\" (UniqueName: \"kubernetes.io/projected/346fc9d4-63c9-4c45-833c-b45822a13b4b-kube-api-access-5ncfc\") pod \"openshift-config-operator-7777fb866f-jwr2w\" (UID: \"346fc9d4-63c9-4c45-833c-b45822a13b4b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.315304 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn6fg\" (UniqueName: \"kubernetes.io/projected/2bf596eb-1810-4ab2-9972-b0452b4c8e9a-kube-api-access-jn6fg\") pod \"service-ca-operator-777779d784-4bnkx\" (UID: \"2bf596eb-1810-4ab2-9972-b0452b4c8e9a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.315720 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-z2hf5"] Nov 29 06:36:03 crc kubenswrapper[4943]: W1129 06:36:03.319833 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35d86102_0e39_4f45_b7b6_c41f9f5daf06.slice/crio-7cd470bb115b6cefea903460b1676333fc1473b2ddb5756019f63523709d9527 WatchSource:0}: Error finding container 7cd470bb115b6cefea903460b1676333fc1473b2ddb5756019f63523709d9527: Status 404 returned error can't find the container with id 7cd470bb115b6cefea903460b1676333fc1473b2ddb5756019f63523709d9527 Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.333547 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjlgr\" (UniqueName: \"kubernetes.io/projected/7b841e1f-1dcf-48f3-97a1-88b401a0eee8-kube-api-access-zjlgr\") pod \"dns-operator-744455d44c-x5kzr\" (UID: \"7b841e1f-1dcf-48f3-97a1-88b401a0eee8\") " pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:03 crc kubenswrapper[4943]: W1129 06:36:03.334760 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podafc2af4e_e064_41fd_8fcf_e184be168a9a.slice/crio-8027cb3499fc3ffbad971f7cb10f207184bc449fb98ed057ed3d20867099e8b0 WatchSource:0}: Error finding container 8027cb3499fc3ffbad971f7cb10f207184bc449fb98ed057ed3d20867099e8b0: Status 404 returned error can't find the container with id 8027cb3499fc3ffbad971f7cb10f207184bc449fb98ed057ed3d20867099e8b0 Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.336500 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.336890 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.836869675 +0000 UTC m=+138.766958498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.339668 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.348601 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.371277 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btlmp\" (UniqueName: \"kubernetes.io/projected/cf402561-e803-41f4-9b0a-caf00944c023-kube-api-access-btlmp\") pod \"cluster-samples-operator-665b6dd947-9shcm\" (UID: \"cf402561-e803-41f4-9b0a-caf00944c023\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.378130 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.379549 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-prf24"] Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.382514 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr"] Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.393175 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztpjw\" (UniqueName: \"kubernetes.io/projected/a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f-kube-api-access-ztpjw\") pod \"dns-default-4txpk\" (UID: \"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f\") " pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.395942 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.402070 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.409107 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnzbn\" (UniqueName: \"kubernetes.io/projected/3ac9cc53-448a-40b4-929e-5d13a9ebb883-kube-api-access-vnzbn\") pod \"kube-storage-version-migrator-operator-b67b599dd-dgt74\" (UID: \"3ac9cc53-448a-40b4-929e-5d13a9ebb883\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.419233 4943 secret.go:188] Couldn't get secret openshift-image-registry/image-registry-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.419265 4943 configmap.go:193] Couldn't get configMap openshift-image-registry/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.419341 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls podName:99933cfb-96e8-4fc3-a9aa-291b306760f6 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.419316563 +0000 UTC m=+139.349405316 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls") pod "cluster-image-registry-operator-dc59b4c8b-b5sg2" (UID: "99933cfb-96e8-4fc3-a9aa-291b306760f6") : failed to sync secret cache: timed out waiting for the condition Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.419370 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca podName:99933cfb-96e8-4fc3-a9aa-291b306760f6 nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.419359114 +0000 UTC m=+139.349447927 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca") pod "cluster-image-registry-operator-dc59b4c8b-b5sg2" (UID: "99933cfb-96e8-4fc3-a9aa-291b306760f6") : failed to sync configmap cache: timed out waiting for the condition Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.425175 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2v9bf" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.431654 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmlrr\" (UniqueName: \"kubernetes.io/projected/b0fc5c3c-5c5c-4f45-8672-a097373973d0-kube-api-access-jmlrr\") pod \"multus-admission-controller-857f4d67dd-48qmb\" (UID: \"b0fc5c3c-5c5c-4f45-8672-a097373973d0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.437285 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.437450 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.937428464 +0000 UTC m=+138.867517217 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.438148 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.438636 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:03.938623518 +0000 UTC m=+138.868712271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.451237 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.451678 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c5563de-5269-435a-8e95-168e43531454-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rc66c\" (UID: \"2c5563de-5269-435a-8e95-168e43531454\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.469828 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/69a44c9d-8342-4b54-a619-02abab39ecf6-kube-api-access-c8jdk\") pod \"etcd-operator-b45778765-cqbvm\" (UID: \"69a44c9d-8342-4b54-a619-02abab39ecf6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.509667 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z46c\" (UniqueName: \"kubernetes.io/projected/2486e195-c853-4f92-a92b-8123812a01d5-kube-api-access-5z46c\") pod \"migrator-59844c95c7-6dflg\" (UID: \"2486e195-c853-4f92-a92b-8123812a01d5\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.534349 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxf2f\" (UniqueName: \"kubernetes.io/projected/c016108a-e89e-4bf3-97b2-fb9344746be8-kube-api-access-cxf2f\") pod \"catalog-operator-68c6474976-wjfps\" (UID: \"c016108a-e89e-4bf3-97b2-fb9344746be8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.539911 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.539972 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.03995554 +0000 UTC m=+138.970044293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.540244 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.540513 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.040505946 +0000 UTC m=+138.970594699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.548568 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnskj\" (UniqueName: \"kubernetes.io/projected/8f722787-3b10-475b-a9ff-4c84d8de3d34-kube-api-access-rnskj\") pod \"ingress-canary-m2skw\" (UID: \"8f722787-3b10-475b-a9ff-4c84d8de3d34\") " pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.557881 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.570534 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x2gg\" (UniqueName: \"kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg\") pod \"collect-profiles-29406630-5fztj\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.589512 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.592883 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vshf\" (UniqueName: \"kubernetes.io/projected/a9c30534-72b1-4990-bd05-1b9e0e7677c6-kube-api-access-8vshf\") pod \"olm-operator-6b444d44fb-8rjvk\" (UID: \"a9c30534-72b1-4990-bd05-1b9e0e7677c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.595409 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.601983 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.608094 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fpml\" (UniqueName: \"kubernetes.io/projected/bbc03c24-61f2-4009-9aa8-09a526122701-kube-api-access-6fpml\") pod \"service-ca-9c57cc56f-vdxv4\" (UID: \"bbc03c24-61f2-4009-9aa8-09a526122701\") " pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.622472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.626761 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7f8d\" (UniqueName: \"kubernetes.io/projected/655ce663-9a0e-4ce9-bdaf-e614234ac533-kube-api-access-c7f8d\") pod \"control-plane-machine-set-operator-78cbb6b69f-7mcpf\" (UID: \"655ce663-9a0e-4ce9-bdaf-e614234ac533\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.628520 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.635818 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.641044 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.641281 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.14125228 +0000 UTC m=+139.071341033 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.641458 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.641881 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.141863338 +0000 UTC m=+139.071952091 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.649320 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0469087-9878-428e-b24e-e1f31f6f8848-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n9dn\" (UID: \"c0469087-9878-428e-b24e-e1f31f6f8848\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.654630 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.667156 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cql88\" (UniqueName: \"kubernetes.io/projected/adebb31c-a7f3-48e5-b82b-fc44b9f5c05b-kube-api-access-cql88\") pod \"machine-config-controller-84d6567774-sdpcj\" (UID: \"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.668888 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.675394 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.687281 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s6cq\" (UniqueName: \"kubernetes.io/projected/b4996ee7-155c-4aaa-bffa-d0e2dcaa863e-kube-api-access-5s6cq\") pod \"csi-hostpathplugin-fzrkw\" (UID: \"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e\") " pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.709653 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.710499 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf85r\" (UniqueName: \"kubernetes.io/projected/c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788-kube-api-access-wf85r\") pod \"package-server-manager-789f6589d5-6qnvs\" (UID: \"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.714240 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.716797 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.733553 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.743076 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.743191 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.743432 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.243392735 +0000 UTC m=+139.173481498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.743629 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.743936 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.243921411 +0000 UTC m=+139.174010214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.760195 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.761136 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m2skw" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.766515 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.844857 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.844988 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.344968174 +0000 UTC m=+139.275056927 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.845162 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.845443 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.345435217 +0000 UTC m=+139.275523970 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.909441 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.916353 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.918853 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qfrz\" (UniqueName: \"kubernetes.io/projected/09b45167-2db2-48c1-8776-4edc3ecb9ffb-kube-api-access-7qfrz\") pod \"router-default-5444994796-h4mrx\" (UID: \"09b45167-2db2-48c1-8776-4edc3ecb9ffb\") " pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.926859 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rk2p\" (UniqueName: \"kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p\") pod \"marketplace-operator-79b997595-gxzdk\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.941948 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.962098 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.963446 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:03 crc kubenswrapper[4943]: E1129 06:36:03.963885 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.463868211 +0000 UTC m=+139.393956964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.981344 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.982005 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.994597 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:03 crc kubenswrapper[4943]: I1129 06:36:03.996066 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.023798 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" event={"ID":"35d86102-0e39-4f45-b7b6-c41f9f5daf06","Type":"ContainerStarted","Data":"7cd470bb115b6cefea903460b1676333fc1473b2ddb5756019f63523709d9527"} Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.026171 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" event={"ID":"a1dadabe-2740-4883-81c1-ea20746772f9","Type":"ContainerStarted","Data":"e70bdea6319191f27a5b241084c4a7b1abb24d3707755d35b99dad88b38bd432"} Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.027835 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" event={"ID":"f836e7f7-5926-4744-8a73-af83fedb06cd","Type":"ContainerStarted","Data":"6bc6a9db28b0f05572afd3816297931d5f0046bed057935c26f4dc84ee8bf8cf"} Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.030702 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" event={"ID":"1c7844d3-bb90-4f62-8aa2-fe2b64f92343","Type":"ContainerStarted","Data":"bfda611bd70f032dde8678d26dc4208d2493dda29b6857445203d133ccb3e852"} Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.033091 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" event={"ID":"afc2af4e-e064-41fd-8fcf-e184be168a9a","Type":"ContainerStarted","Data":"8027cb3499fc3ffbad971f7cb10f207184bc449fb98ed057ed3d20867099e8b0"} Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.086921 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.087614 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.587599288 +0000 UTC m=+139.517688041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.192252 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.193076 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.692998589 +0000 UTC m=+139.623087342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.294431 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.294939 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.794922158 +0000 UTC m=+139.725010911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.395409 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.396451 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.896430005 +0000 UTC m=+139.826518758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.432150 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.442180 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.451366 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-v52hz"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.497024 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.497376 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.497396 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.498401 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99933cfb-96e8-4fc3-a9aa-291b306760f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.498694 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:04.998683183 +0000 UTC m=+139.928771936 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.504262 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/99933cfb-96e8-4fc3-a9aa-291b306760f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5sg2\" (UID: \"99933cfb-96e8-4fc3-a9aa-291b306760f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.537529 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74"] Nov 29 06:36:04 crc kubenswrapper[4943]: W1129 06:36:04.562415 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddab93fb0_80fd_4bb3_aa09_e95434e7354b.slice/crio-716a40098a7e3004fd32715a2f8667a4153f56046abe82e62b94bea09da5db4c WatchSource:0}: Error finding container 716a40098a7e3004fd32715a2f8667a4153f56046abe82e62b94bea09da5db4c: Status 404 returned error can't find the container with id 716a40098a7e3004fd32715a2f8667a4153f56046abe82e62b94bea09da5db4c Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.571308 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.599250 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.599635 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.099616623 +0000 UTC m=+140.029705376 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.600639 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.628008 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x5kzr"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.662169 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.663414 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hkg6f"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.673454 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.702471 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vdxv4"] Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.702647 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.702998 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.202983013 +0000 UTC m=+140.133071766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.803314 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.803459 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.303429818 +0000 UTC m=+140.233518571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.803612 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.803941 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.303927544 +0000 UTC m=+140.234016297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:04 crc kubenswrapper[4943]: I1129 06:36:04.911480 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:04 crc kubenswrapper[4943]: E1129 06:36:04.911833 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.411817347 +0000 UTC m=+140.341906100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.013100 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.013408 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.513397165 +0000 UTC m=+140.443485918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: W1129 06:36:05.043828 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09b45167_2db2_48c1_8776_4edc3ecb9ffb.slice/crio-9cc1fb8355786da73a8f862646baef314b8e47e376887f04a3247fe11d41988e WatchSource:0}: Error finding container 9cc1fb8355786da73a8f862646baef314b8e47e376887f04a3247fe11d41988e: Status 404 returned error can't find the container with id 9cc1fb8355786da73a8f862646baef314b8e47e376887f04a3247fe11d41988e Nov 29 06:36:05 crc kubenswrapper[4943]: W1129 06:36:05.047377 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ac9cc53_448a_40b4_929e_5d13a9ebb883.slice/crio-47f35d1ce219335c074256c8b016131501a4453a4a577a4defd417f5c301f226 WatchSource:0}: Error finding container 47f35d1ce219335c074256c8b016131501a4453a4a577a4defd417f5c301f226: Status 404 returned error can't find the container with id 47f35d1ce219335c074256c8b016131501a4453a4a577a4defd417f5c301f226 Nov 29 06:36:05 crc kubenswrapper[4943]: W1129 06:36:05.049420 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod036ea3a8_bb27_4f18_9438_ed26ae51833c.slice/crio-63795ad113cf6d983d7c8354794aae86711dbf9a45df3961482fdfaa6dde9c9f WatchSource:0}: Error finding container 63795ad113cf6d983d7c8354794aae86711dbf9a45df3961482fdfaa6dde9c9f: Status 404 returned error can't find the container with id 63795ad113cf6d983d7c8354794aae86711dbf9a45df3961482fdfaa6dde9c9f Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.058166 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" event={"ID":"35d86102-0e39-4f45-b7b6-c41f9f5daf06","Type":"ContainerStarted","Data":"c96693038fe58d6389a14cb35ae52df435fef075ebbc4b90131e96db69785a49"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.058855 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.061742 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-v52hz" event={"ID":"dab93fb0-80fd-4bb3-aa09-e95434e7354b","Type":"ContainerStarted","Data":"716a40098a7e3004fd32715a2f8667a4153f56046abe82e62b94bea09da5db4c"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.064793 4943 patch_prober.go:28] interesting pod/console-operator-58897d9998-z2b2d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.064840 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" podUID="35d86102-0e39-4f45-b7b6-c41f9f5daf06" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.087522 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" event={"ID":"b98074c8-0098-4d89-899a-948d21ef3500","Type":"ContainerStarted","Data":"7e58e724eb452002cd2aaeb164c4bdd8720518eaa223887ece5e0f2ca2175371"} Nov 29 06:36:05 crc kubenswrapper[4943]: W1129 06:36:05.097738 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b841e1f_1dcf_48f3_97a1_88b401a0eee8.slice/crio-6ca7510aa01907b34a27deaadb3b3bb8ff498a8338f2cc9d4ef36f18d3b6aeb8 WatchSource:0}: Error finding container 6ca7510aa01907b34a27deaadb3b3bb8ff498a8338f2cc9d4ef36f18d3b6aeb8: Status 404 returned error can't find the container with id 6ca7510aa01907b34a27deaadb3b3bb8ff498a8338f2cc9d4ef36f18d3b6aeb8 Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.115027 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.116272 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.616255132 +0000 UTC m=+140.546343885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.156423 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" event={"ID":"55652a9d-fd7e-4c49-993e-bf1eff1d57f9","Type":"ContainerStarted","Data":"a9d4d7657be546c457f1ac79c601263cad2d54254d7d60d7d6628182dc2f3e76"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.163413 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" event={"ID":"afc2af4e-e064-41fd-8fcf-e184be168a9a","Type":"ContainerStarted","Data":"1841c54250ee85605927f2b414e54c93af858b21e0ff6cd303d3e5086456aead"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.167225 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" event={"ID":"9eddac8b-ec06-4ec0-9b84-541a6361a9fa","Type":"ContainerStarted","Data":"d1cb29c6946ef9bf2589f5454969def9cd1df991d00e121d6b3f117b3c81f8a6"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.184763 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" event={"ID":"13d43ef8-ebda-4e16-8616-ac9697607054","Type":"ContainerStarted","Data":"c7b513b982fcdd267bf8fb9d64467529752d301e1371dd99c558d3d5a78b38f7"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.187144 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" podStartSLOduration=120.187123129 podStartE2EDuration="2m0.187123129s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:05.18608641 +0000 UTC m=+140.116175163" watchObservedRunningTime="2025-11-29 06:36:05.187123129 +0000 UTC m=+140.117211882" Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.190701 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" event={"ID":"8405beb0-21cf-44f5-9979-3488c214d762","Type":"ContainerStarted","Data":"f3530cbe0409141b51d282b9d3cd76e8088947bc4d6f366ecf0aa2716c5fe04c"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.190762 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" event={"ID":"8405beb0-21cf-44f5-9979-3488c214d762","Type":"ContainerStarted","Data":"2686a917bf5748ff143972b7561ee80bf6e7e438da82c56a67d28d845da7c9e9"} Nov 29 06:36:05 crc kubenswrapper[4943]: W1129 06:36:05.191332 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod642bec88_869e_42da_baa9_47e98f95ba17.slice/crio-96d3685000b358e8c226d3122c9f9494328202be7cce9522271a2c29c64365bc WatchSource:0}: Error finding container 96d3685000b358e8c226d3122c9f9494328202be7cce9522271a2c29c64365bc: Status 404 returned error can't find the container with id 96d3685000b358e8c226d3122c9f9494328202be7cce9522271a2c29c64365bc Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.210116 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jxf26" event={"ID":"1dec43e3-8363-43ca-a96b-6127086f75db","Type":"ContainerStarted","Data":"ec1bf6a62a079387422d09c78951020accd37aaef8b20d7d45e74cd9e1ce4e82"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.216563 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.217043 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.717026607 +0000 UTC m=+140.647115360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.244448 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" event={"ID":"a1dadabe-2740-4883-81c1-ea20746772f9","Type":"ContainerStarted","Data":"c2095b519e8af3077df896da033ceafcc0f9df700d74f40b4181bc31299c8644"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.255071 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" event={"ID":"f836e7f7-5926-4744-8a73-af83fedb06cd","Type":"ContainerStarted","Data":"6d7c858a5e44ce22bb9f0f7e233b38b737457f8ec7bfe3f9f6210bf68bca1324"} Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.266392 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-282cm"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.278359 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cqbvm"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.299526 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-fzrkw"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.317506 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.318343 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.818326607 +0000 UTC m=+140.748415360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.432206 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.433144 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:05.933128923 +0000 UTC m=+140.863217686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.473314 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-8wxhh" podStartSLOduration=120.473293211 podStartE2EDuration="2m0.473293211s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:05.468443949 +0000 UTC m=+140.398532702" watchObservedRunningTime="2025-11-29 06:36:05.473293211 +0000 UTC m=+140.403381964" Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.533145 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.534014 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.033998651 +0000 UTC m=+140.964087404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.589610 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-48qmb"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.602553 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.636337 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.636797 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.136784175 +0000 UTC m=+141.066872938 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.637709 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4txpk"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.651728 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.658644 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.659318 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.661636 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.665426 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.713686 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.728486 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.738429 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.738542 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.238520588 +0000 UTC m=+141.168609341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.738847 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.739147 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.239140427 +0000 UTC m=+141.169229180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.739221 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.744512 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.747356 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m2skw"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.749459 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.753012 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.787222 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.840831 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.841292 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.341277331 +0000 UTC m=+141.271366084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.918931 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:36:05 crc kubenswrapper[4943]: I1129 06:36:05.943068 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:05 crc kubenswrapper[4943]: E1129 06:36:05.943434 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.443421056 +0000 UTC m=+141.373509809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.044946 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.045387 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.545361006 +0000 UTC m=+141.475449769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: W1129 06:36:06.056543 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc016108a_e89e_4bf3_97b2_fb9344746be8.slice/crio-6f266e57830fc897920907afc1e543175ded5d8d49a7c1aee6ab40e8691eb9b7 WatchSource:0}: Error finding container 6f266e57830fc897920907afc1e543175ded5d8d49a7c1aee6ab40e8691eb9b7: Status 404 returned error can't find the container with id 6f266e57830fc897920907afc1e543175ded5d8d49a7c1aee6ab40e8691eb9b7 Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.149401 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.150161 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.650145578 +0000 UTC m=+141.580234331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.251719 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.252663 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.752645384 +0000 UTC m=+141.682734137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.282505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" event={"ID":"c016108a-e89e-4bf3-97b2-fb9344746be8","Type":"ContainerStarted","Data":"6f266e57830fc897920907afc1e543175ded5d8d49a7c1aee6ab40e8691eb9b7"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.295846 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" event={"ID":"2c5563de-5269-435a-8e95-168e43531454","Type":"ContainerStarted","Data":"04f2e3d6a9203f0e5d42d142804623e8cc3a3e3e83a66266c5d3acd1dd9b8dd7"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.297189 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" event={"ID":"655ce663-9a0e-4ce9-bdaf-e614234ac533","Type":"ContainerStarted","Data":"6dce89dfd2c751b61fbe7fbf60f60ebbf7bcdf7254ba43ee5f76adbf401d987e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.299414 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" event={"ID":"f836e7f7-5926-4744-8a73-af83fedb06cd","Type":"ContainerStarted","Data":"2c95dad240bbbc690c4a4555465110d671f597b384b96a6b0db2e1c75a93fdff"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.352422 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" event={"ID":"13d43ef8-ebda-4e16-8616-ac9697607054","Type":"ContainerStarted","Data":"071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.357525 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.358067 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.858036125 +0000 UTC m=+141.788124878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.358274 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.359821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" event={"ID":"d20623eb-726b-4684-a8f4-3358d6a1c7fd","Type":"ContainerStarted","Data":"202520c651c5d0702d9385426e9b71d329bf0467e3c10492430289b11298e49a"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.369239 4943 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-sdcdn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.22:6443/healthz\": dial tcp 10.217.0.22:6443: connect: connection refused" start-of-body= Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.369306 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.22:6443/healthz\": dial tcp 10.217.0.22:6443: connect: connection refused" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.380087 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2v9bf" event={"ID":"642bec88-869e-42da-baa9-47e98f95ba17","Type":"ContainerStarted","Data":"96d3685000b358e8c226d3122c9f9494328202be7cce9522271a2c29c64365bc"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.394877 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" podStartSLOduration=121.394861194 podStartE2EDuration="2m1.394861194s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.393056412 +0000 UTC m=+141.323145165" watchObservedRunningTime="2025-11-29 06:36:06.394861194 +0000 UTC m=+141.324949947" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.395187 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7z22q" podStartSLOduration=121.395181514 podStartE2EDuration="2m1.395181514s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.333875606 +0000 UTC m=+141.263964369" watchObservedRunningTime="2025-11-29 06:36:06.395181514 +0000 UTC m=+141.325270267" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.407877 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" event={"ID":"99933cfb-96e8-4fc3-a9aa-291b306760f6","Type":"ContainerStarted","Data":"a139961f6c8a501f69dbc43bfb1c8d6caa851271157842d0f9b32bd4aeddd0f9"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.411064 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" event={"ID":"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b","Type":"ContainerStarted","Data":"e4a150db906ae9bf6c9e9dab1d8fe23237c395f4556aeb6d1dc21c3453de4928"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.453516 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" event={"ID":"c0469087-9878-428e-b24e-e1f31f6f8848","Type":"ContainerStarted","Data":"1d79abfe25c288f1271f8effcaeb92989ba9be94d6ce1faab719fc4fc5e73843"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.459079 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.459190 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.95916653 +0000 UTC m=+141.889255283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.459326 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" event={"ID":"b143e5c3-54ec-40d9-9c11-690cf321df9f","Type":"ContainerStarted","Data":"434f0c961426d5b612f7f9f09e46a49502f7526b210628897f8bd2df3c085e1e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.459604 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.460655 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:06.960639483 +0000 UTC m=+141.890728306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.476894 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" event={"ID":"4af90184-bb3b-455d-a9dc-9e120c08b3c7","Type":"ContainerStarted","Data":"230b7f76898a954460c07deaa1376fb4f586eef6d23807967a1aed8863d712cf"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.483616 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" event={"ID":"8584d24e-b420-4eca-8947-081c7d6c69c7","Type":"ContainerStarted","Data":"e505f9888a1e7132064121ca05611c8fd76debaa9986481b002d4ef20e3d634e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.491144 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" event={"ID":"346fc9d4-63c9-4c45-833c-b45822a13b4b","Type":"ContainerStarted","Data":"9a66221fd7af8d04ee3c17913ab11b6741ac795de26b4ae11cdaeeb397d03079"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.496479 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-h4mrx" event={"ID":"09b45167-2db2-48c1-8776-4edc3ecb9ffb","Type":"ContainerStarted","Data":"9cc1fb8355786da73a8f862646baef314b8e47e376887f04a3247fe11d41988e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.498229 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" event={"ID":"bbc03c24-61f2-4009-9aa8-09a526122701","Type":"ContainerStarted","Data":"5cec224fdef49c3c2b359c0dcb010c3c6d8f8068f7f4bbad7dcca114813c97c5"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.499954 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" event={"ID":"f343b01f-4fea-4964-a4ac-3a6968e65967","Type":"ContainerStarted","Data":"c0a223b13b197e2f0272e79ba58b6fdcf1b32d213e120254c4f7169192a8e3ff"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.499992 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" event={"ID":"f343b01f-4fea-4964-a4ac-3a6968e65967","Type":"ContainerStarted","Data":"c46e8f7d06337b142c7935b9965496047d6159e7d3b794aa9b474a5c42f9dd05"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.512063 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" event={"ID":"69a44c9d-8342-4b54-a619-02abab39ecf6","Type":"ContainerStarted","Data":"70f383caa75371ce09f226c640bc2683a2b6956d58d88769e8e28c87abcf64b4"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.520229 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" event={"ID":"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e","Type":"ContainerStarted","Data":"e65e8a840aea17efcd46ab69c6b85d478cf39710ab95ac460c9a0864f8d3d963"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.530030 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5ckgg" podStartSLOduration=120.530002277 podStartE2EDuration="2m0.530002277s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.519678964 +0000 UTC m=+141.449767737" watchObservedRunningTime="2025-11-29 06:36:06.530002277 +0000 UTC m=+141.460091030" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.535837 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" event={"ID":"afc2af4e-e064-41fd-8fcf-e184be168a9a","Type":"ContainerStarted","Data":"1387d1db2988bcad028f0cda665d045bc036f65f18729180330c177bc272c293"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.544401 4943 generic.go:334] "Generic (PLEG): container finished" podID="ddf860a2-14da-473d-97e4-c5b2a28828c1" containerID="d0273aafe9f5fa699bd55ab8692c1fd7ce627b9436acfc698fe656b3380ded1e" exitCode=0 Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.544478 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" event={"ID":"ddf860a2-14da-473d-97e4-c5b2a28828c1","Type":"ContainerDied","Data":"d0273aafe9f5fa699bd55ab8692c1fd7ce627b9436acfc698fe656b3380ded1e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.544512 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" event={"ID":"ddf860a2-14da-473d-97e4-c5b2a28828c1","Type":"ContainerStarted","Data":"0ef11a5da7cd3b254f7d8f3b9d91a71d038ba56a362ed72d1ab9ecd064eb75ad"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.549407 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" event={"ID":"b0fc5c3c-5c5c-4f45-8672-a097373973d0","Type":"ContainerStarted","Data":"c9d6a8da7ba75a1a3859dc5ff206c6888d7d36c8930c78cc8a59906c0a94a6a9"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.550464 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" event={"ID":"55652a9d-fd7e-4c49-993e-bf1eff1d57f9","Type":"ContainerStarted","Data":"478e0e9eae6f51214fb97a18269ac27724149ce49145c21882b6c92cee5130da"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.551394 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.553791 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" event={"ID":"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788","Type":"ContainerStarted","Data":"79ea1616a95f1a836ceddebd1c1055183015439fcf45ff41bee5d01b560f54e5"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.571003 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" event={"ID":"1c7844d3-bb90-4f62-8aa2-fe2b64f92343","Type":"ContainerStarted","Data":"786dd28d0703d7e7e933f33cf6f7bf1d211944484d939ef59722aec5776554e4"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.571597 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.582145 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-z2hf5" podStartSLOduration=120.582123096 podStartE2EDuration="2m0.582123096s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.569162285 +0000 UTC m=+141.499251058" watchObservedRunningTime="2025-11-29 06:36:06.582123096 +0000 UTC m=+141.512211849" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.589820 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.593492 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.092741207 +0000 UTC m=+142.022829960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.593624 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.595234 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" podStartSLOduration=121.59522331 podStartE2EDuration="2m1.59522331s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.592486289 +0000 UTC m=+141.522575052" watchObservedRunningTime="2025-11-29 06:36:06.59522331 +0000 UTC m=+141.525312063" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.608281 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" event={"ID":"036ea3a8-bb27-4f18-9438-ed26ae51833c","Type":"ContainerStarted","Data":"63795ad113cf6d983d7c8354794aae86711dbf9a45df3961482fdfaa6dde9c9f"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.616984 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.621281 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" event={"ID":"3ac9cc53-448a-40b4-929e-5d13a9ebb883","Type":"ContainerStarted","Data":"ce7378f5acbf69058963024e24a5e382e0c59fd4867605c0b6cc665394b383ea"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.621335 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" event={"ID":"3ac9cc53-448a-40b4-929e-5d13a9ebb883","Type":"ContainerStarted","Data":"47f35d1ce219335c074256c8b016131501a4453a4a577a4defd417f5c301f226"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.624780 4943 generic.go:334] "Generic (PLEG): container finished" podID="9eddac8b-ec06-4ec0-9b84-541a6361a9fa" containerID="d440d017bf95ebc6ed155c52aafa1737199cd01227f9e1d543f822234effa083" exitCode=0 Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.625164 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" event={"ID":"9eddac8b-ec06-4ec0-9b84-541a6361a9fa","Type":"ContainerDied","Data":"d440d017bf95ebc6ed155c52aafa1737199cd01227f9e1d543f822234effa083"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.653078 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m2skw" event={"ID":"8f722787-3b10-475b-a9ff-4c84d8de3d34","Type":"ContainerStarted","Data":"f4382ea1b7d06048f61d4bdc35283f5a3a412a700b9df8226dec33984a7635df"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.671743 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4txpk" event={"ID":"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f","Type":"ContainerStarted","Data":"00c3da0662835d43cadd454617d20ed8d26b231fdece9b38c2f871a29651d92e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.686989 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" event={"ID":"7b841e1f-1dcf-48f3-97a1-88b401a0eee8","Type":"ContainerStarted","Data":"6ca7510aa01907b34a27deaadb3b3bb8ff498a8338f2cc9d4ef36f18d3b6aeb8"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.690076 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" event={"ID":"2486e195-c853-4f92-a92b-8123812a01d5","Type":"ContainerStarted","Data":"f2091027c554a50ed8ef4473c5447d4d2338d88c5f26bf00faa8859854b99f4e"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.706541 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.709983 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.209967944 +0000 UTC m=+142.140056807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.724289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" event={"ID":"a9c30534-72b1-4990-bd05-1b9e0e7677c6","Type":"ContainerStarted","Data":"f2d81ead48405bddf8f652d73032b2a2cf57f7a06e24806f46b9f9b566adc69b"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.760596 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" podStartSLOduration=120.760556588 podStartE2EDuration="2m0.760556588s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.758132216 +0000 UTC m=+141.688220969" watchObservedRunningTime="2025-11-29 06:36:06.760556588 +0000 UTC m=+141.690645341" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.765316 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" event={"ID":"2bf596eb-1810-4ab2-9972-b0452b4c8e9a","Type":"ContainerStarted","Data":"68232dcc6bf35e31ec96ca783b35552e84aba3e1853411ada957367fbb02de86"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.803700 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-v52hz" event={"ID":"dab93fb0-80fd-4bb3-aa09-e95434e7354b","Type":"ContainerStarted","Data":"3c87b9cfa621774002d1a9eb1122d2f1fd4bee3c84df4314d31f22b6b67ecbdc"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.804423 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.812755 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.813176 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.31315715 +0000 UTC m=+142.243245903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.821050 4943 patch_prober.go:28] interesting pod/downloads-7954f5f757-v52hz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.821131 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-v52hz" podUID="dab93fb0-80fd-4bb3-aa09-e95434e7354b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.823189 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" event={"ID":"b98074c8-0098-4d89-899a-948d21ef3500","Type":"ContainerStarted","Data":"49ef39a5a3eddd81de330e00f80b1da0c6d4586fdc6cc5b0cbfad5ee2f26fac5"} Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.837539 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-z2b2d" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.908178 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dgt74" podStartSLOduration=120.908158745 podStartE2EDuration="2m0.908158745s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.872119219 +0000 UTC m=+141.802207972" watchObservedRunningTime="2025-11-29 06:36:06.908158745 +0000 UTC m=+141.838247498" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.908554 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-h65pr" podStartSLOduration=121.908545637 podStartE2EDuration="2m1.908545637s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.899989097 +0000 UTC m=+141.830077850" watchObservedRunningTime="2025-11-29 06:36:06.908545637 +0000 UTC m=+141.838634390" Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.916674 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:06 crc kubenswrapper[4943]: E1129 06:36:06.920097 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.420082275 +0000 UTC m=+142.350171028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:06 crc kubenswrapper[4943]: I1129 06:36:06.958721 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-v52hz" podStartSLOduration=121.958700808 podStartE2EDuration="2m1.958700808s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:06.956792232 +0000 UTC m=+141.886880985" watchObservedRunningTime="2025-11-29 06:36:06.958700808 +0000 UTC m=+141.888789561" Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.019165 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.019494 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.51947778 +0000 UTC m=+142.449566533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.120690 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.121300 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.621272635 +0000 UTC m=+142.551361378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.222214 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.222666 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.722647557 +0000 UTC m=+142.652736310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.325595 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.326291 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.826275446 +0000 UTC m=+142.756364199 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.430070 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.430849 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:07.930818522 +0000 UTC m=+142.860907325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.531619 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.531928 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.031916596 +0000 UTC m=+142.962005349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.638096 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.638441 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.138426329 +0000 UTC m=+143.068515082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.742456 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.742918 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.242891743 +0000 UTC m=+143.172980496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.843992 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.844589 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.344559024 +0000 UTC m=+143.274647777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.910716 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-h4mrx" event={"ID":"09b45167-2db2-48c1-8776-4edc3ecb9ffb","Type":"ContainerStarted","Data":"4cc8d8d0d0b30c1baff116d552ce5e2eaa180b152d01bfdbf1f8edfe1495f694"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.932512 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2v9bf" event={"ID":"642bec88-869e-42da-baa9-47e98f95ba17","Type":"ContainerStarted","Data":"70d98394db563ad67bd581b5493d7a07ef2e704f25aac05868fa8b4c85bd9e14"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.939599 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-h4mrx" podStartSLOduration=121.939556379 podStartE2EDuration="2m1.939556379s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:07.937755917 +0000 UTC m=+142.867844690" watchObservedRunningTime="2025-11-29 06:36:07.939556379 +0000 UTC m=+142.869645132" Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.945443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:07 crc kubenswrapper[4943]: E1129 06:36:07.945897 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.445879575 +0000 UTC m=+143.375968408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.946840 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" event={"ID":"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b","Type":"ContainerStarted","Data":"510825bf47f56b1031661866144a34ec7502ec41a14cd82b90d014bd64004928"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.950342 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" event={"ID":"8405beb0-21cf-44f5-9979-3488c214d762","Type":"ContainerStarted","Data":"92782454388b659d15dc637790f1f7b4767678f46082ffcd920a4fb63dfb6ba1"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.959513 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jxf26" event={"ID":"1dec43e3-8363-43ca-a96b-6127086f75db","Type":"ContainerStarted","Data":"1557192a6d7fee4b0900b4925e7ff68190f8f79047ae88efec5e0e96b6edd04a"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.964147 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.969523 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" event={"ID":"69a44c9d-8342-4b54-a619-02abab39ecf6","Type":"ContainerStarted","Data":"f8bcede2dc4a87d870c3abe818c8fdb9a0ee811788eb1d6409ab8c9e3d29a9dd"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.970420 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:07 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:07 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:07 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.970455 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.974859 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" event={"ID":"bbc03c24-61f2-4009-9aa8-09a526122701","Type":"ContainerStarted","Data":"87bce36f1ba7584488f0defa5eee1e1ce3411f19bea217728bee6e66c6b7b21a"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.977148 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" event={"ID":"7b841e1f-1dcf-48f3-97a1-88b401a0eee8","Type":"ContainerStarted","Data":"d16ce1914458ec664cd66bdd150e73aa4fe5670383d73945a74ad6552781f6dc"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.979734 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" event={"ID":"c0469087-9878-428e-b24e-e1f31f6f8848","Type":"ContainerStarted","Data":"2276672296e18101fa0837b4dd7bd733b55442398aea727edcd43ba8f52f9799"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.982914 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" event={"ID":"cf402561-e803-41f4-9b0a-caf00944c023","Type":"ContainerStarted","Data":"7a394db76dfbf8a275c3d82e32de919211e773453d28085d3342af6edadd36e8"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.984352 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" event={"ID":"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788","Type":"ContainerStarted","Data":"260607b95ad38cc3fc26f9a9d4b35ed74aa8b3bd754547c119ffa5710b4a0c48"} Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.987898 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-2v9bf" podStartSLOduration=7.987883127 podStartE2EDuration="7.987883127s" podCreationTimestamp="2025-11-29 06:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:07.956358452 +0000 UTC m=+142.886447215" watchObservedRunningTime="2025-11-29 06:36:07.987883127 +0000 UTC m=+142.917971880" Nov 29 06:36:07 crc kubenswrapper[4943]: I1129 06:36:07.989677 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-prf24" podStartSLOduration=121.989667369 podStartE2EDuration="2m1.989667369s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:07.985670792 +0000 UTC m=+142.915759565" watchObservedRunningTime="2025-11-29 06:36:07.989667369 +0000 UTC m=+142.919756122" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.000240 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m2skw" event={"ID":"8f722787-3b10-475b-a9ff-4c84d8de3d34","Type":"ContainerStarted","Data":"9f18f47f80abeab0f5c1cd857079d86badc25e8ee059e93d709d14a684347ce4"} Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.003053 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" event={"ID":"8584d24e-b420-4eca-8947-081c7d6c69c7","Type":"ContainerStarted","Data":"c3e2a2d4ec60f941e5ff6a3f0716cfc0f108e54026b873082d887a7274532eb3"} Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.004153 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-cqbvm" podStartSLOduration=123.004141833 podStartE2EDuration="2m3.004141833s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.003616418 +0000 UTC m=+142.933705181" watchObservedRunningTime="2025-11-29 06:36:08.004141833 +0000 UTC m=+142.934230586" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.005735 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" event={"ID":"036ea3a8-bb27-4f18-9438-ed26ae51833c","Type":"ContainerStarted","Data":"5e4ea65362d9026a2ae82d596140f0e6aff110fc35b96d5b7724950a3c733fcd"} Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.006267 4943 patch_prober.go:28] interesting pod/downloads-7954f5f757-v52hz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.006300 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-v52hz" podUID="dab93fb0-80fd-4bb3-aa09-e95434e7354b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.047102 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.047556 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.547529455 +0000 UTC m=+143.477618208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.054668 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.055112 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.555097108 +0000 UTC m=+143.485185861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.055721 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-jxf26" podStartSLOduration=123.055708075 podStartE2EDuration="2m3.055708075s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.032625639 +0000 UTC m=+142.962714402" watchObservedRunningTime="2025-11-29 06:36:08.055708075 +0000 UTC m=+142.985796828" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.056187 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vdxv4" podStartSLOduration=122.056180539 podStartE2EDuration="2m2.056180539s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.051128571 +0000 UTC m=+142.981217324" watchObservedRunningTime="2025-11-29 06:36:08.056180539 +0000 UTC m=+142.986269292" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.075776 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n9dn" podStartSLOduration=122.075754093 podStartE2EDuration="2m2.075754093s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.070642364 +0000 UTC m=+143.000731137" watchObservedRunningTime="2025-11-29 06:36:08.075754093 +0000 UTC m=+143.005842846" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.092219 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4nd4v" podStartSLOduration=123.092196895 podStartE2EDuration="2m3.092196895s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.088957161 +0000 UTC m=+143.019045914" watchObservedRunningTime="2025-11-29 06:36:08.092196895 +0000 UTC m=+143.022285648" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.116014 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.118017 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-m2skw" podStartSLOduration=8.117995242 podStartE2EDuration="8.117995242s" podCreationTimestamp="2025-11-29 06:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:08.115977793 +0000 UTC m=+143.046066556" watchObservedRunningTime="2025-11-29 06:36:08.117995242 +0000 UTC m=+143.048084005" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.157291 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.157990 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.657499671 +0000 UTC m=+143.587588424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.158053 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.158775 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.658768008 +0000 UTC m=+143.588856751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.260038 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.260388 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.760354686 +0000 UTC m=+143.690443439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.261295 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.261650 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.761640084 +0000 UTC m=+143.691728837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.363242 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.364105 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.864084228 +0000 UTC m=+143.794172991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.466603 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.467298 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:08.967284884 +0000 UTC m=+143.897373647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.568912 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.569306 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.069287955 +0000 UTC m=+143.999376708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.678937 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.679694 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.179681382 +0000 UTC m=+144.109770135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.781456 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.782020 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.282000043 +0000 UTC m=+144.212088796 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.883250 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.883706 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.383684194 +0000 UTC m=+144.313773017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.967973 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:08 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:08 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:08 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.968056 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.985045 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.985201 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.48517644 +0000 UTC m=+144.415265193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:08 crc kubenswrapper[4943]: I1129 06:36:08.985407 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:08 crc kubenswrapper[4943]: E1129 06:36:08.985839 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.485822589 +0000 UTC m=+144.415911342 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.072183 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" event={"ID":"655ce663-9a0e-4ce9-bdaf-e614234ac533","Type":"ContainerStarted","Data":"f544b3fb1a4534eba513eb833077b8f0b2ca45ec9a7311d36921f4e36cbfb947"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.086045 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.086344 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.586329136 +0000 UTC m=+144.516417889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.095232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" event={"ID":"99933cfb-96e8-4fc3-a9aa-291b306760f6","Type":"ContainerStarted","Data":"1e62f15c8bbbbdc7c40603d717d94c7bbc6610344d025d03848c152d0c7e7335"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.104736 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4txpk" event={"ID":"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f","Type":"ContainerStarted","Data":"45447d8ef9d71c03847e1c00039fc74759e6ecd7f3d149498f206653b45bd201"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.105364 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.125933 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" event={"ID":"a9c30534-72b1-4990-bd05-1b9e0e7677c6","Type":"ContainerStarted","Data":"c29538129961fe6a9a4f09c5721e5d85070f76750a585cbadb8fe5f15bd4835b"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.126634 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.140105 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-7mcpf" podStartSLOduration=123.140089923 podStartE2EDuration="2m3.140089923s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.126058152 +0000 UTC m=+144.056146905" watchObservedRunningTime="2025-11-29 06:36:09.140089923 +0000 UTC m=+144.070178666" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.141608 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" event={"ID":"7b841e1f-1dcf-48f3-97a1-88b401a0eee8","Type":"ContainerStarted","Data":"85fec0e4e97822ca822a08f0c0f32590bf0562659aa5a8b21f4af8bba25dffd0"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.149931 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.151966 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" event={"ID":"9eddac8b-ec06-4ec0-9b84-541a6361a9fa","Type":"ContainerStarted","Data":"43fd7194452f94ef9752abef0c0341c514eb5e09da23ed2ec25f4acb779056b1"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.187445 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.188931 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" event={"ID":"c6d3fa52-5b2c-45c0-bb7b-8f5c7bade788","Type":"ContainerStarted","Data":"6dd6335a1dac2294d9af1372c31e6d3e04fb24dd20f4bc410157c62e7c2c6387"} Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.189442 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.68942756 +0000 UTC m=+144.619516323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.189736 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.211054 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5sg2" podStartSLOduration=124.211027113 podStartE2EDuration="2m4.211027113s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.201351129 +0000 UTC m=+144.131439882" watchObservedRunningTime="2025-11-29 06:36:09.211027113 +0000 UTC m=+144.141115866" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.211207 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" event={"ID":"8584d24e-b420-4eca-8947-081c7d6c69c7","Type":"ContainerStarted","Data":"302405ed82f4db2a6f99b24e87b03687cdb12c23e1cab8910304e1e78c556294"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.224231 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" event={"ID":"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e","Type":"ContainerStarted","Data":"1ff57e703691460d6a6e375ae060f95f56c9342db648e53578930317f2f53d44"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.244265 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-4txpk" podStartSLOduration=9.244247057 podStartE2EDuration="9.244247057s" podCreationTimestamp="2025-11-29 06:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.244173565 +0000 UTC m=+144.174262328" watchObservedRunningTime="2025-11-29 06:36:09.244247057 +0000 UTC m=+144.174335800" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.254016 4943 generic.go:334] "Generic (PLEG): container finished" podID="346fc9d4-63c9-4c45-833c-b45822a13b4b" containerID="3eadcfbe8949e6260195918712f4b1c850df95c08220da629022b58f719b7abf" exitCode=0 Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.254133 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" event={"ID":"346fc9d4-63c9-4c45-833c-b45822a13b4b","Type":"ContainerDied","Data":"3eadcfbe8949e6260195918712f4b1c850df95c08220da629022b58f719b7abf"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.272256 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" event={"ID":"ddf860a2-14da-473d-97e4-c5b2a28828c1","Type":"ContainerStarted","Data":"f0ad1472b80a675e3466a864635a64001881f9819641595d215cfa7c24cd421b"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.279701 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" event={"ID":"b143e5c3-54ec-40d9-9c11-690cf321df9f","Type":"ContainerStarted","Data":"051624975b58b473f64ada6ab33192bb4aabaadb34e364f5d979362e71f5190b"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.280840 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.281922 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" event={"ID":"2bf596eb-1810-4ab2-9972-b0452b4c8e9a","Type":"ContainerStarted","Data":"91b994c94d0eef2c6c390d0258fc5a10d9f5ea8badf7131e6f7aa6e0ada01e57"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.287280 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8rjvk" podStartSLOduration=123.287264569 podStartE2EDuration="2m3.287264569s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.285125755 +0000 UTC m=+144.215214518" watchObservedRunningTime="2025-11-29 06:36:09.287264569 +0000 UTC m=+144.217353312" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.288343 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.288687 4943 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gxzdk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.288740 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.289232 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.789213405 +0000 UTC m=+144.719302158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.293164 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" event={"ID":"4af90184-bb3b-455d-a9dc-9e120c08b3c7","Type":"ContainerStarted","Data":"540021f999e4c8d4aadddf42db9b9abca345e7a35b470e82498ca1339e2b2120"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.308049 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" event={"ID":"adebb31c-a7f3-48e5-b82b-fc44b9f5c05b","Type":"ContainerStarted","Data":"ce56fa89c5fabcda0516e3bb6054fd5ccfe431221ba14f38bc19c520d34ee95f"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.313381 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" event={"ID":"d20623eb-726b-4684-a8f4-3358d6a1c7fd","Type":"ContainerStarted","Data":"476aed17444812151ec51c85dc4b9124421bef5492b113b082b361db36e9b7f3"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.313882 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.319244 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" event={"ID":"2c5563de-5269-435a-8e95-168e43531454","Type":"ContainerStarted","Data":"ca3a60d263968aa66a45f2fa66e435533c450713e513efabfaee5c5b162847a7"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.351838 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" event={"ID":"2486e195-c853-4f92-a92b-8123812a01d5","Type":"ContainerStarted","Data":"62e78762c6bb4954c07b77ff9dc9ab9a73f36563edfbbb65ab0a34ade3cb9319"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.351878 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" event={"ID":"2486e195-c853-4f92-a92b-8123812a01d5","Type":"ContainerStarted","Data":"1640abba6fe4580999a114faa3fcdfdf0be59a9f53a4d63e2c6b571fa736394a"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.385182 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" event={"ID":"b0fc5c3c-5c5c-4f45-8672-a097373973d0","Type":"ContainerStarted","Data":"932f80ceb9fd55fad128a37a81a20b4935a7b431b7ff8b2c702560c1602ddda1"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.395611 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.396960 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.896942244 +0000 UTC m=+144.827031097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.412947 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" event={"ID":"cf402561-e803-41f4-9b0a-caf00944c023","Type":"ContainerStarted","Data":"901f8ea5a7953fe08874c6ffd482381bd3c5d2cba373e24702cda95dfc942710"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.413005 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" event={"ID":"cf402561-e803-41f4-9b0a-caf00944c023","Type":"ContainerStarted","Data":"bbde49908a51bde42b831bf6caaf759a76dfe7e100f6c53d1c335666021079e2"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.437347 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" event={"ID":"c016108a-e89e-4bf3-97b2-fb9344746be8","Type":"ContainerStarted","Data":"f61920720eb5ce33936ec6c12e98a2e9b5ed4b80f163cd649e2411f5822cc7ee"} Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.437773 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.441169 4943 patch_prober.go:28] interesting pod/downloads-7954f5f757-v52hz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.441221 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-v52hz" podUID="dab93fb0-80fd-4bb3-aa09-e95434e7354b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.449256 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-282cm" podStartSLOduration=123.449235777 podStartE2EDuration="2m3.449235777s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.324103519 +0000 UTC m=+144.254192272" watchObservedRunningTime="2025-11-29 06:36:09.449235777 +0000 UTC m=+144.379324530" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.473843 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" podStartSLOduration=123.473820529 podStartE2EDuration="2m3.473820529s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.451065922 +0000 UTC m=+144.381154675" watchObservedRunningTime="2025-11-29 06:36:09.473820529 +0000 UTC m=+144.403909292" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.497188 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.499062 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:09.999043148 +0000 UTC m=+144.929131901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.537982 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.582093 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" podStartSLOduration=123.582076553 podStartE2EDuration="2m3.582076553s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.533047455 +0000 UTC m=+144.463136218" watchObservedRunningTime="2025-11-29 06:36:09.582076553 +0000 UTC m=+144.512165316" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.595108 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bnkx" podStartSLOduration=123.595084004 podStartE2EDuration="2m3.595084004s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.585983348 +0000 UTC m=+144.516072131" watchObservedRunningTime="2025-11-29 06:36:09.595084004 +0000 UTC m=+144.525172767" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.603994 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.604479 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.104462539 +0000 UTC m=+145.034551292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.680961 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-x5kzr" podStartSLOduration=124.680941122 podStartE2EDuration="2m4.680941122s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.679391827 +0000 UTC m=+144.609480600" watchObservedRunningTime="2025-11-29 06:36:09.680941122 +0000 UTC m=+144.611029875" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.682073 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" podStartSLOduration=123.682066745 podStartE2EDuration="2m3.682066745s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.639635741 +0000 UTC m=+144.569724514" watchObservedRunningTime="2025-11-29 06:36:09.682066745 +0000 UTC m=+144.612155518" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.712983 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.713480 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.213460686 +0000 UTC m=+145.143549439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.793462 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sdpcj" podStartSLOduration=123.793442901 podStartE2EDuration="2m3.793442901s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.790182896 +0000 UTC m=+144.720271649" watchObservedRunningTime="2025-11-29 06:36:09.793442901 +0000 UTC m=+144.723531654" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.815088 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.815475 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.315459727 +0000 UTC m=+145.245548480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.909994 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" podStartSLOduration=123.909970958 podStartE2EDuration="2m3.909970958s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.846860857 +0000 UTC m=+144.776949630" watchObservedRunningTime="2025-11-29 06:36:09.909970958 +0000 UTC m=+144.840059721" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.919035 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:09 crc kubenswrapper[4943]: E1129 06:36:09.919588 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.419548149 +0000 UTC m=+145.349636902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.948411 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rc66c" podStartSLOduration=123.948387895 podStartE2EDuration="2m3.948387895s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.947601631 +0000 UTC m=+144.877690404" watchObservedRunningTime="2025-11-29 06:36:09.948387895 +0000 UTC m=+144.878476648" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.949210 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6dflg" podStartSLOduration=123.949204478 podStartE2EDuration="2m3.949204478s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:09.911653138 +0000 UTC m=+144.841741901" watchObservedRunningTime="2025-11-29 06:36:09.949204478 +0000 UTC m=+144.879293231" Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.969867 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:09 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:09 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:09 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:09 crc kubenswrapper[4943]: I1129 06:36:09.969920 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.002388 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" podStartSLOduration=124.002370747 podStartE2EDuration="2m4.002370747s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.002324396 +0000 UTC m=+144.932413169" watchObservedRunningTime="2025-11-29 06:36:10.002370747 +0000 UTC m=+144.932459500" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.020338 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.020759 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.520744536 +0000 UTC m=+145.450833299 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.121618 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.122023 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.621995435 +0000 UTC m=+145.552084198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.122346 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.122805 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.622789408 +0000 UTC m=+145.552878161 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.223555 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.223754 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.723722848 +0000 UTC m=+145.653811611 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.224010 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.224407 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.724395558 +0000 UTC m=+145.654484361 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.270824 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" podStartSLOduration=124.270799058 podStartE2EDuration="2m4.270799058s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.075278985 +0000 UTC m=+145.005367758" watchObservedRunningTime="2025-11-29 06:36:10.270799058 +0000 UTC m=+145.200887811" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.314189 4943 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzqp2 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.314322 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" podUID="d20623eb-726b-4684-a8f4-3358d6a1c7fd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.324779 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.324966 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.824937746 +0000 UTC m=+145.755026499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.325183 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.325546 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.825534223 +0000 UTC m=+145.755622976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.348459 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9shcm" podStartSLOduration=125.348441196 podStartE2EDuration="2m5.348441196s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.271772107 +0000 UTC m=+145.201860880" watchObservedRunningTime="2025-11-29 06:36:10.348441196 +0000 UTC m=+145.278529949" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.425906 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.426232 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:10.926217066 +0000 UTC m=+145.856305819 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.450353 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4txpk" event={"ID":"a75b0153-e2d9-4bd7-87f3-fa6c0bebe67f","Type":"ContainerStarted","Data":"458c8a7c9f6d193eb42b50b5c6759999009233c70f3c7070ea6d55d20a719570"} Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.452848 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" event={"ID":"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e","Type":"ContainerStarted","Data":"f1c054f65b762864e4e535e3b7f147be0dec7ba95a9ce469d12e0180e7e7992b"} Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.454473 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" event={"ID":"346fc9d4-63c9-4c45-833c-b45822a13b4b","Type":"ContainerStarted","Data":"f395927c3a75e38d51945a69c2590c1056e0a54d915456c305b3dcf50fd682c6"} Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.455186 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.457340 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" event={"ID":"ddf860a2-14da-473d-97e4-c5b2a28828c1","Type":"ContainerStarted","Data":"f56fbc06663270ea7d0af477d66d74d6778d66025efa733cf64d83f28f464019"} Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.460102 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-48qmb" event={"ID":"b0fc5c3c-5c5c-4f45-8672-a097373973d0","Type":"ContainerStarted","Data":"b3097ec1fd4021cc836e80a7c71894f9fff68fea5d0a132d455de96f1060e61a"} Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.461799 4943 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gxzdk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.461840 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.513439 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wjfps" podStartSLOduration=124.513420823 podStartE2EDuration="2m4.513420823s" podCreationTimestamp="2025-11-29 06:34:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.349077904 +0000 UTC m=+145.279166657" watchObservedRunningTime="2025-11-29 06:36:10.513420823 +0000 UTC m=+145.443509596" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.527291 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.533495 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.033482162 +0000 UTC m=+145.963570915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.628432 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.628621 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.12859143 +0000 UTC m=+146.058680193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.628826 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.629224 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.129213988 +0000 UTC m=+146.059302821 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.646492 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" podStartSLOduration=125.646468524 podStartE2EDuration="2m5.646468524s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.515872155 +0000 UTC m=+145.445960918" watchObservedRunningTime="2025-11-29 06:36:10.646468524 +0000 UTC m=+145.576557277" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.736298 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.736811 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.236797053 +0000 UTC m=+146.166885806 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.796725 4943 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.839463 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.839840 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.339829234 +0000 UTC m=+146.269917987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.858731 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" podStartSLOduration=125.858713488 podStartE2EDuration="2m5.858713488s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:10.644916378 +0000 UTC m=+145.575005141" watchObservedRunningTime="2025-11-29 06:36:10.858713488 +0000 UTC m=+145.788802241" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.860364 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.861439 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.865483 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.886366 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.942101 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.942712 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.942824 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt9f5\" (UniqueName: \"kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.942861 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:10 crc kubenswrapper[4943]: E1129 06:36:10.942989 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.442969378 +0000 UTC m=+146.373058141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.966512 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:10 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:10 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:10 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:10 crc kubenswrapper[4943]: I1129 06:36:10.966600 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.025703 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.026652 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.028307 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.038459 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.043766 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt9f5\" (UniqueName: \"kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.043816 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.043913 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.043982 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.044323 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.544307131 +0000 UTC m=+146.474395894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.045040 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.045235 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.081256 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt9f5\" (UniqueName: \"kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5\") pod \"community-operators-vxdd2\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.144781 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.144994 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.644964031 +0000 UTC m=+146.575052784 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.145197 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frcnn\" (UniqueName: \"kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.145260 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.145356 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.145392 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.145745 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.645734394 +0000 UTC m=+146.575823237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.189286 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.241548 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.242596 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.246177 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.246295 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.746276183 +0000 UTC m=+146.676364936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.246498 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frcnn\" (UniqueName: \"kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.246925 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.246962 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.247023 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.247043 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.247289 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.747282222 +0000 UTC m=+146.677370975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.247477 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.255930 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.269714 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frcnn\" (UniqueName: \"kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn\") pod \"certified-operators-sr58d\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.342662 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.348343 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.348546 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.848515631 +0000 UTC m=+146.778604384 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.348722 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fs7q\" (UniqueName: \"kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.348764 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.348852 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.348879 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.349158 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.849147679 +0000 UTC m=+146.779236512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.427821 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.429265 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.439656 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.450486 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.450714 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.950680886 +0000 UTC m=+146.880769649 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.450812 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fs7q\" (UniqueName: \"kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.450851 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.450897 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.450920 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.451288 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 06:36:11.951272163 +0000 UTC m=+146.881360966 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vm9wf" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.451394 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.451610 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.464694 4943 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzqp2 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.464763 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" podUID="d20623eb-726b-4684-a8f4-3358d6a1c7fd" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.499448 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fs7q\" (UniqueName: \"kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q\") pod \"community-operators-mxcdj\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.552544 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.552789 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.552825 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.553684 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8vkp\" (UniqueName: \"kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: E1129 06:36:11.553754 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 06:36:12.053730878 +0000 UTC m=+146.983819641 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.556894 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.586825 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" event={"ID":"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e","Type":"ContainerStarted","Data":"f4ea11ceaa938768f025e3ece963cc840b3f5acaa382a9cc5f973d7494b37fac"} Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.586865 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" event={"ID":"b4996ee7-155c-4aaa-bffa-d0e2dcaa863e","Type":"ContainerStarted","Data":"4a2867bf5f344bce163fc17cb63e9d64a67f4d7eb8d41b983b8097f6df389ae1"} Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.588308 4943 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gxzdk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.588340 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.612125 4943 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-29T06:36:10.796762812Z","Handler":null,"Name":""} Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.638804 4943 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.638847 4943 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.657349 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.657434 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8vkp\" (UniqueName: \"kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.657594 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.657671 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.659087 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.659300 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.670776 4943 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.670820 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.721760 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.726486 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8vkp\" (UniqueName: \"kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp\") pod \"certified-operators-knbjh\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.753456 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.775627 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vm9wf\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.807493 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.862279 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.916077 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.934902 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.972372 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:11 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:11 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:11 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:11 crc kubenswrapper[4943]: I1129 06:36:11.972715 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.042775 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:36:12 crc kubenswrapper[4943]: W1129 06:36:12.066811 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a026553_a5c9_47ea_bce1_4aa730f8f516.slice/crio-8865ecc97922b5b436a34893516f4c2b7fd4b954ea427000efc81ffb0fc44a5a WatchSource:0}: Error finding container 8865ecc97922b5b436a34893516f4c2b7fd4b954ea427000efc81ffb0fc44a5a: Status 404 returned error can't find the container with id 8865ecc97922b5b436a34893516f4c2b7fd4b954ea427000efc81ffb0fc44a5a Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.236316 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.306290 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.524354 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.525184 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.527044 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.527422 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.533401 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.589827 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jwr2w" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.605138 4943 generic.go:334] "Generic (PLEG): container finished" podID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerID="e3413492f790a89481d36af5e84f44edbf6c556b0ffa9576b5452eaa4d803b78" exitCode=0 Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.605217 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerDied","Data":"e3413492f790a89481d36af5e84f44edbf6c556b0ffa9576b5452eaa4d803b78"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.605253 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerStarted","Data":"54d03b8db7ba57d012b0b4bda2083c5a17380c358a7a60dca6eb9e2ee213c503"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.607624 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.608454 4943 generic.go:334] "Generic (PLEG): container finished" podID="6220f73c-eb92-4998-b58c-3d6faae45361" containerID="873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9" exitCode=0 Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.608494 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerDied","Data":"873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.608512 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerStarted","Data":"3cd74aa6987c7d8ec545405717f4462192e87b21d4480f82e63dbb2195904b0d"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.616179 4943 generic.go:334] "Generic (PLEG): container finished" podID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerID="40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd" exitCode=0 Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.616294 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerDied","Data":"40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.616325 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerStarted","Data":"8865ecc97922b5b436a34893516f4c2b7fd4b954ea427000efc81ffb0fc44a5a"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.635076 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" event={"ID":"3049b3ed-f405-4ecc-ade1-ad9753e53c1d","Type":"ContainerStarted","Data":"67ff8095ff1209486ab413dd7a6f7e39b292e752542190eed375a2de4032a3a0"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.635117 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" event={"ID":"3049b3ed-f405-4ecc-ade1-ad9753e53c1d","Type":"ContainerStarted","Data":"63c0a4936cad362f4d9a73d6c824c5a930b46bbb3ab9c27afd75c3716ece2388"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.635751 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.645805 4943 generic.go:334] "Generic (PLEG): container finished" podID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerID="6c00f555cf3311aaaccff80347322bf1abd8e9658989cab55d5828c7dbeea95c" exitCode=0 Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.646910 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerDied","Data":"6c00f555cf3311aaaccff80347322bf1abd8e9658989cab55d5828c7dbeea95c"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.646998 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerStarted","Data":"e88c543c8c06285086c75b14e5b8d08e3cbe597695c47e2f871024ca39d865cb"} Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.683995 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.684238 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.707945 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-fzrkw" podStartSLOduration=12.707928873 podStartE2EDuration="12.707928873s" podCreationTimestamp="2025-11-29 06:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:12.706263844 +0000 UTC m=+147.636352597" watchObservedRunningTime="2025-11-29 06:36:12.707928873 +0000 UTC m=+147.638017626" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.747556 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" podStartSLOduration=127.747537564 podStartE2EDuration="2m7.747537564s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:12.746667078 +0000 UTC m=+147.676755841" watchObservedRunningTime="2025-11-29 06:36:12.747537564 +0000 UTC m=+147.677626317" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.786061 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.786191 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.788092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.813304 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.832934 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.851208 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.858394 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.859137 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.893961 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.945130 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.945511 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.960754 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.969630 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:12 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:12 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:12 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.970037 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.989908 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.990214 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:12 crc kubenswrapper[4943]: I1129 06:36:12.990422 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d656k\" (UniqueName: \"kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.092007 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d656k\" (UniqueName: \"kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.092443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.092576 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.093374 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.093483 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.122649 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d656k\" (UniqueName: \"kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k\") pod \"redhat-marketplace-6rz7x\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.180593 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.183681 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.208839 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.210001 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.216794 4943 patch_prober.go:28] interesting pod/downloads-7954f5f757-v52hz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.216855 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-v52hz" podUID="dab93fb0-80fd-4bb3-aa09-e95434e7354b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.216951 4943 patch_prober.go:28] interesting pod/downloads-7954f5f757-v52hz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.217019 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-v52hz" podUID="dab93fb0-80fd-4bb3-aa09-e95434e7354b" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.229337 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.231173 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.240751 4943 patch_prober.go:28] interesting pod/console-f9d7485db-jxf26 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.240812 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jxf26" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.243384 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.296473 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.296546 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.296607 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.300188 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.300696 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.301080 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.356105 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.358061 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.358937 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.359326 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.359551 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.370370 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hkg6f" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.372184 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.397443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.397503 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.397520 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skchv\" (UniqueName: \"kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.397669 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.403615 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.403704 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzqp2" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.432669 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.499266 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.499459 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.499482 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skchv\" (UniqueName: \"kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.500992 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.501447 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.544052 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skchv\" (UniqueName: \"kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv\") pod \"redhat-marketplace-q2zm5\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.565049 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.649359 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.700932 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerStarted","Data":"6e9834ed8098217de9ecb1f5464249c9479fe217b85afd6d0ccf377df920d828"} Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.706697 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79","Type":"ContainerStarted","Data":"b9a175f32f00c2c2740315737f8e5ee857f1eba27af700258f48424051f044bb"} Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.714686 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2sdx6" Nov 29 06:36:13 crc kubenswrapper[4943]: W1129 06:36:13.862802 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-5acf72eb5877d2dd290bac6af754d2e9c665b61d8778b20d0370baf5dc914fb8 WatchSource:0}: Error finding container 5acf72eb5877d2dd290bac6af754d2e9c665b61d8778b20d0370baf5dc914fb8: Status 404 returned error can't find the container with id 5acf72eb5877d2dd290bac6af754d2e9c665b61d8778b20d0370baf5dc914fb8 Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.962635 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.984281 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:13 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:13 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:13 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:13 crc kubenswrapper[4943]: I1129 06:36:13.984375 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.003938 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.031391 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.032392 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.041835 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.065462 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.230479 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s4nm\" (UniqueName: \"kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.230917 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.230960 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.333256 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.333345 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s4nm\" (UniqueName: \"kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.333406 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.334487 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.334806 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.370644 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.395787 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s4nm\" (UniqueName: \"kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm\") pod \"redhat-operators-p7m24\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.432144 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.433691 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.464982 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.537265 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.537336 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.537376 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r95nt\" (UniqueName: \"kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.638280 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.638382 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.638413 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r95nt\" (UniqueName: \"kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.639397 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.639668 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.666710 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r95nt\" (UniqueName: \"kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt\") pod \"redhat-operators-dt5vl\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.678890 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.746478 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0662903e610c0350bb98914c82b4923fae33123e3f11ea74e3e5b126a488f6e2"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.771190 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.782158 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerID="0788f66a0780680bbe47bd10555adfb0158c79d3afad512631ba4ce4b3346bb1" exitCode=0 Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.784041 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerDied","Data":"0788f66a0780680bbe47bd10555adfb0158c79d3afad512631ba4ce4b3346bb1"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.805776 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f6d03800efc37506cb872c90cc061bef33fce4563e97d936feb7cc0235c98703"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.805827 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5acf72eb5877d2dd290bac6af754d2e9c665b61d8778b20d0370baf5dc914fb8"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.810457 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c6a169aeaec65b38d919265e058b50deaec84dca54e763f675384fd37ac4b947"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.810489 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"152e78ee29bbad8a5f22a4630c4ea0621c891fb28817069823ccb619c38c2f1f"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.841963 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79","Type":"ContainerStarted","Data":"06434e8787f582401d887686e8929386a388465ff1388e707d42a5454349eea8"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.857089 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerStarted","Data":"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.857195 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerStarted","Data":"b44d9a99199847c84d4fa54908c966312a156850266450d84f0e08fd10d99d41"} Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.976482 4943 patch_prober.go:28] interesting pod/router-default-5444994796-h4mrx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 06:36:14 crc kubenswrapper[4943]: [-]has-synced failed: reason withheld Nov 29 06:36:14 crc kubenswrapper[4943]: [+]process-running ok Nov 29 06:36:14 crc kubenswrapper[4943]: healthz check failed Nov 29 06:36:14 crc kubenswrapper[4943]: I1129 06:36:14.976546 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h4mrx" podUID="09b45167-2db2-48c1-8776-4edc3ecb9ffb" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.269719 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.269702621 podStartE2EDuration="3.269702621s" podCreationTimestamp="2025-11-29 06:36:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:14.934096221 +0000 UTC m=+149.864184994" watchObservedRunningTime="2025-11-29 06:36:15.269702621 +0000 UTC m=+150.199791374" Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.270119 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:36:15 crc kubenswrapper[4943]: W1129 06:36:15.302741 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc48a9e5e_033e_465e_a3e9_c474245e7b0d.slice/crio-88c266bd6222f92c9d48f9cad2fba563d8dce978ced491dbb1a8051d2ca6bd0b WatchSource:0}: Error finding container 88c266bd6222f92c9d48f9cad2fba563d8dce978ced491dbb1a8051d2ca6bd0b: Status 404 returned error can't find the container with id 88c266bd6222f92c9d48f9cad2fba563d8dce978ced491dbb1a8051d2ca6bd0b Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.451149 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:36:15 crc kubenswrapper[4943]: W1129 06:36:15.492065 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41b233a1_b997_493b_a71c_e4b9bd816479.slice/crio-edec8a592fc73daa7eb1dacc550d1174546b0166db82f3fe054621920330b926 WatchSource:0}: Error finding container edec8a592fc73daa7eb1dacc550d1174546b0166db82f3fe054621920330b926: Status 404 returned error can't find the container with id edec8a592fc73daa7eb1dacc550d1174546b0166db82f3fe054621920330b926 Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.868055 4943 generic.go:334] "Generic (PLEG): container finished" podID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerID="64e7ebf323b635d729503ce57eeb24a04b2e5d0831112589a7a5d439f27c2b01" exitCode=0 Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.868233 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerDied","Data":"64e7ebf323b635d729503ce57eeb24a04b2e5d0831112589a7a5d439f27c2b01"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.868455 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerStarted","Data":"88c266bd6222f92c9d48f9cad2fba563d8dce978ced491dbb1a8051d2ca6bd0b"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.883192 4943 generic.go:334] "Generic (PLEG): container finished" podID="a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" containerID="06434e8787f582401d887686e8929386a388465ff1388e707d42a5454349eea8" exitCode=0 Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.883277 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79","Type":"ContainerDied","Data":"06434e8787f582401d887686e8929386a388465ff1388e707d42a5454349eea8"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.918586 4943 generic.go:334] "Generic (PLEG): container finished" podID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerID="49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e" exitCode=0 Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.918686 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerDied","Data":"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.959944 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a9e2435a904bbe83dabeadc46bb6f92b09d7b5d6a2df79784723402a893a48d6"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.961540 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.966931 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.968115 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" event={"ID":"4af90184-bb3b-455d-a9dc-9e120c08b3c7","Type":"ContainerDied","Data":"540021f999e4c8d4aadddf42db9b9abca345e7a35b470e82498ca1339e2b2120"} Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.968125 4943 generic.go:334] "Generic (PLEG): container finished" podID="4af90184-bb3b-455d-a9dc-9e120c08b3c7" containerID="540021f999e4c8d4aadddf42db9b9abca345e7a35b470e82498ca1339e2b2120" exitCode=0 Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.970294 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-h4mrx" Nov 29 06:36:15 crc kubenswrapper[4943]: I1129 06:36:15.973939 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerStarted","Data":"edec8a592fc73daa7eb1dacc550d1174546b0166db82f3fe054621920330b926"} Nov 29 06:36:16 crc kubenswrapper[4943]: I1129 06:36:16.080878 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:36:16 crc kubenswrapper[4943]: I1129 06:36:16.985595 4943 generic.go:334] "Generic (PLEG): container finished" podID="41b233a1-b997-493b-a71c-e4b9bd816479" containerID="f1e37a834ee922656d1544911480f7e9903a9c667520f5eca1abe6d550e7ae24" exitCode=0 Nov 29 06:36:16 crc kubenswrapper[4943]: I1129 06:36:16.985880 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerDied","Data":"f1e37a834ee922656d1544911480f7e9903a9c667520f5eca1abe6d550e7ae24"} Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.316613 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.390972 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.440872 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x2gg\" (UniqueName: \"kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg\") pod \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.440950 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir\") pod \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.440989 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access\") pod \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\" (UID: \"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79\") " Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.441013 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume\") pod \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.441067 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") pod \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\" (UID: \"4af90184-bb3b-455d-a9dc-9e120c08b3c7\") " Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.441333 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" (UID: "a7dfc819-9091-4d8f-93f6-bb79e4a7fa79"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.442111 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume" (OuterVolumeSpecName: "config-volume") pod "4af90184-bb3b-455d-a9dc-9e120c08b3c7" (UID: "4af90184-bb3b-455d-a9dc-9e120c08b3c7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.449951 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" (UID: "a7dfc819-9091-4d8f-93f6-bb79e4a7fa79"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.453704 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg" (OuterVolumeSpecName: "kube-api-access-8x2gg") pod "4af90184-bb3b-455d-a9dc-9e120c08b3c7" (UID: "4af90184-bb3b-455d-a9dc-9e120c08b3c7"). InnerVolumeSpecName "kube-api-access-8x2gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.465133 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4af90184-bb3b-455d-a9dc-9e120c08b3c7" (UID: "4af90184-bb3b-455d-a9dc-9e120c08b3c7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.544100 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x2gg\" (UniqueName: \"kubernetes.io/projected/4af90184-bb3b-455d-a9dc-9e120c08b3c7-kube-api-access-8x2gg\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.544137 4943 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.544149 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7dfc819-9091-4d8f-93f6-bb79e4a7fa79-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.544160 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4af90184-bb3b-455d-a9dc-9e120c08b3c7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.544172 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4af90184-bb3b-455d-a9dc-9e120c08b3c7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.563652 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 06:36:17 crc kubenswrapper[4943]: E1129 06:36:17.563889 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" containerName="pruner" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.563900 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" containerName="pruner" Nov 29 06:36:17 crc kubenswrapper[4943]: E1129 06:36:17.563917 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af90184-bb3b-455d-a9dc-9e120c08b3c7" containerName="collect-profiles" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.563923 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af90184-bb3b-455d-a9dc-9e120c08b3c7" containerName="collect-profiles" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.564016 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7dfc819-9091-4d8f-93f6-bb79e4a7fa79" containerName="pruner" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.564026 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af90184-bb3b-455d-a9dc-9e120c08b3c7" containerName="collect-profiles" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.564365 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.566429 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.567003 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.573157 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.746791 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.747153 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.847781 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.847832 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.848185 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.868794 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:17 crc kubenswrapper[4943]: I1129 06:36:17.887112 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.008239 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" event={"ID":"4af90184-bb3b-455d-a9dc-9e120c08b3c7","Type":"ContainerDied","Data":"230b7f76898a954460c07deaa1376fb4f586eef6d23807967a1aed8863d712cf"} Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.008278 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="230b7f76898a954460c07deaa1376fb4f586eef6d23807967a1aed8863d712cf" Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.008327 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj" Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.027093 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a7dfc819-9091-4d8f-93f6-bb79e4a7fa79","Type":"ContainerDied","Data":"b9a175f32f00c2c2740315737f8e5ee857f1eba27af700258f48424051f044bb"} Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.027129 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9a175f32f00c2c2740315737f8e5ee857f1eba27af700258f48424051f044bb" Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.027242 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.222197 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 06:36:18 crc kubenswrapper[4943]: W1129 06:36:18.249114 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podb880f23f_a4a4_470f_bdbc_00c63b879ee7.slice/crio-75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb WatchSource:0}: Error finding container 75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb: Status 404 returned error can't find the container with id 75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb Nov 29 06:36:18 crc kubenswrapper[4943]: I1129 06:36:18.460229 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-4txpk" Nov 29 06:36:19 crc kubenswrapper[4943]: I1129 06:36:19.035291 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b880f23f-a4a4-470f-bdbc-00c63b879ee7","Type":"ContainerStarted","Data":"75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb"} Nov 29 06:36:21 crc kubenswrapper[4943]: I1129 06:36:21.053986 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b880f23f-a4a4-470f-bdbc-00c63b879ee7","Type":"ContainerStarted","Data":"c33158ece1ad54dd2ed3d71693fa5cbe161637925272b7d593e948b68f4adbed"} Nov 29 06:36:22 crc kubenswrapper[4943]: I1129 06:36:22.073363 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=5.073343025 podStartE2EDuration="5.073343025s" podCreationTimestamp="2025-11-29 06:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:36:22.072422448 +0000 UTC m=+157.002511221" watchObservedRunningTime="2025-11-29 06:36:22.073343025 +0000 UTC m=+157.003431778" Nov 29 06:36:23 crc kubenswrapper[4943]: I1129 06:36:23.067046 4943 generic.go:334] "Generic (PLEG): container finished" podID="b880f23f-a4a4-470f-bdbc-00c63b879ee7" containerID="c33158ece1ad54dd2ed3d71693fa5cbe161637925272b7d593e948b68f4adbed" exitCode=0 Nov 29 06:36:23 crc kubenswrapper[4943]: I1129 06:36:23.067107 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b880f23f-a4a4-470f-bdbc-00c63b879ee7","Type":"ContainerDied","Data":"c33158ece1ad54dd2ed3d71693fa5cbe161637925272b7d593e948b68f4adbed"} Nov 29 06:36:23 crc kubenswrapper[4943]: I1129 06:36:23.215648 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:23 crc kubenswrapper[4943]: I1129 06:36:23.219112 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:36:23 crc kubenswrapper[4943]: I1129 06:36:23.232051 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-v52hz" Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.354054 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.549054 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir\") pod \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.549145 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access\") pod \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\" (UID: \"b880f23f-a4a4-470f-bdbc-00c63b879ee7\") " Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.549185 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b880f23f-a4a4-470f-bdbc-00c63b879ee7" (UID: "b880f23f-a4a4-470f-bdbc-00c63b879ee7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.549481 4943 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.555790 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b880f23f-a4a4-470f-bdbc-00c63b879ee7" (UID: "b880f23f-a4a4-470f-bdbc-00c63b879ee7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:36:24 crc kubenswrapper[4943]: I1129 06:36:24.650921 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b880f23f-a4a4-470f-bdbc-00c63b879ee7-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:36:25 crc kubenswrapper[4943]: I1129 06:36:25.078186 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b880f23f-a4a4-470f-bdbc-00c63b879ee7","Type":"ContainerDied","Data":"75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb"} Nov 29 06:36:25 crc kubenswrapper[4943]: I1129 06:36:25.078514 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75b9f0dce17264d4c487233155bb8162b19f5404785524e2d35d04ddc55de1cb" Nov 29 06:36:25 crc kubenswrapper[4943]: I1129 06:36:25.078233 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 06:36:27 crc kubenswrapper[4943]: I1129 06:36:27.686896 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:36:27 crc kubenswrapper[4943]: I1129 06:36:27.692079 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7b0785d-0c62-4fef-83aa-a9d32e9d388b-metrics-certs\") pod \"network-metrics-daemon-4wgtt\" (UID: \"b7b0785d-0c62-4fef-83aa-a9d32e9d388b\") " pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:36:27 crc kubenswrapper[4943]: I1129 06:36:27.778061 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4wgtt" Nov 29 06:36:31 crc kubenswrapper[4943]: I1129 06:36:31.944574 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:36:32 crc kubenswrapper[4943]: I1129 06:36:32.613857 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:36:32 crc kubenswrapper[4943]: I1129 06:36:32.613963 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:36:43 crc kubenswrapper[4943]: I1129 06:36:43.987287 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6qnvs" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.157165 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 06:36:51 crc kubenswrapper[4943]: E1129 06:36:51.157957 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b880f23f-a4a4-470f-bdbc-00c63b879ee7" containerName="pruner" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.157970 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b880f23f-a4a4-470f-bdbc-00c63b879ee7" containerName="pruner" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.158070 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b880f23f-a4a4-470f-bdbc-00c63b879ee7" containerName="pruner" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.158464 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.161951 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.161999 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.163923 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.278152 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.278265 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.379943 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.380153 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.380359 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.413805 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:51 crc kubenswrapper[4943]: I1129 06:36:51.479799 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.363697 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.365824 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.368313 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.531339 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.531496 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.531532 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.632345 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.632416 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.632461 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.632525 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.632551 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.647611 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access\") pod \"installer-9-crc\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:55 crc kubenswrapper[4943]: I1129 06:36:55.687674 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:36:56 crc kubenswrapper[4943]: I1129 06:36:56.655024 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 06:37:02 crc kubenswrapper[4943]: I1129 06:37:02.613848 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:37:02 crc kubenswrapper[4943]: I1129 06:37:02.614206 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.631818 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.632597 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d656k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-6rz7x_openshift-marketplace(3b60b6a4-d67c-4450-b5f3-58d3124be789): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.634736 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-6rz7x" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.818047 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.818557 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r95nt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dt5vl_openshift-marketplace(c48a9e5e-033e-465e-a3e9-c474245e7b0d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\": context canceled" logger="UnhandledError" Nov 29 06:37:11 crc kubenswrapper[4943]: E1129 06:37:11.819816 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-dt5vl" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.487911 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dt5vl" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.488212 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-6rz7x" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.546272 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3006211711/3\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.546442 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5s4nm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-p7m24_openshift-marketplace(41b233a1-b997-493b-a71c-e4b9bd816479): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3006211711/3\": happened during read: context canceled" logger="UnhandledError" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.547620 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage3006211711/3\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.557258 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.557459 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-frcnn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sr58d_openshift-marketplace(8911d577-ec61-4e6d-96e1-c51ee6b5477e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.559527 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sr58d" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.578252 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.578439 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x8vkp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-knbjh_openshift-marketplace(6220f73c-eb92-4998-b58c-3d6faae45361): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:13 crc kubenswrapper[4943]: E1129 06:37:13.580026 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-knbjh" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" Nov 29 06:37:14 crc kubenswrapper[4943]: E1129 06:37:14.936776 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-knbjh" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" Nov 29 06:37:14 crc kubenswrapper[4943]: E1129 06:37:14.937028 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" Nov 29 06:37:14 crc kubenswrapper[4943]: E1129 06:37:14.938647 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sr58d" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.049511 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.050057 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2fs7q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mxcdj_openshift-marketplace(9a026553-a5c9-47ea-bce1-4aa730f8f516): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.051653 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-mxcdj" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.072759 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.072879 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-skchv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-q2zm5_openshift-marketplace(86599fb0-5276-47e4-af0d-d7e5c3520d5e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.074025 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-q2zm5" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.089658 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.089836 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bt9f5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vxdd2_openshift-marketplace(04357fd3-79d7-4a5c-b4ba-01e1ff2face4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.091159 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vxdd2" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" Nov 29 06:37:15 crc kubenswrapper[4943]: I1129 06:37:15.166690 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4wgtt"] Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.351441 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mxcdj" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.351871 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-q2zm5" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" Nov 29 06:37:15 crc kubenswrapper[4943]: E1129 06:37:15.352112 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vxdd2" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" Nov 29 06:37:15 crc kubenswrapper[4943]: I1129 06:37:15.355976 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" event={"ID":"b7b0785d-0c62-4fef-83aa-a9d32e9d388b","Type":"ContainerStarted","Data":"88fe1a0bf894f07b76d8fd272c617116f09b4e1399d182e4562ff34d1d10f7a4"} Nov 29 06:37:15 crc kubenswrapper[4943]: I1129 06:37:15.421191 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 06:37:15 crc kubenswrapper[4943]: I1129 06:37:15.444992 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.357968 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" event={"ID":"b7b0785d-0c62-4fef-83aa-a9d32e9d388b","Type":"ContainerStarted","Data":"67fbf195c0ddf1622d8228df94878f9e59dfbbc9b996ec34ac26642dd3e7e87e"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.359180 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4wgtt" event={"ID":"b7b0785d-0c62-4fef-83aa-a9d32e9d388b","Type":"ContainerStarted","Data":"860f3217bcd6a6d21a6309e0dff2099a97e790ba52c3d2c07d3f862fbea11eb5"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.360005 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7431070a-052c-42c5-a80e-f5d6853d7cf2","Type":"ContainerStarted","Data":"beeb610892328381484815f9801c68c057c0d701c5b740910fe94a58c351611d"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.360031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7431070a-052c-42c5-a80e-f5d6853d7cf2","Type":"ContainerStarted","Data":"f420b3e53173a3a57d70f04d692a56cabc7d0d27ff308ff2c683764a45b54af8"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.364151 4943 generic.go:334] "Generic (PLEG): container finished" podID="af440fe2-c5f0-455c-b063-05e0f710814b" containerID="eb803f23100b21152b1bee22b8bcbcfd0bcb2e1733320041f8f9a20dc59c4d7c" exitCode=0 Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.364231 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"af440fe2-c5f0-455c-b063-05e0f710814b","Type":"ContainerDied","Data":"eb803f23100b21152b1bee22b8bcbcfd0bcb2e1733320041f8f9a20dc59c4d7c"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.364265 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"af440fe2-c5f0-455c-b063-05e0f710814b","Type":"ContainerStarted","Data":"a70f9158f3b6cdf099f95d1f093c38656aa6bb3927402f7e857586c5911eff89"} Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.377223 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-4wgtt" podStartSLOduration=191.377204433 podStartE2EDuration="3m11.377204433s" podCreationTimestamp="2025-11-29 06:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:37:16.371554951 +0000 UTC m=+211.301643724" watchObservedRunningTime="2025-11-29 06:37:16.377204433 +0000 UTC m=+211.307293186" Nov 29 06:37:16 crc kubenswrapper[4943]: I1129 06:37:16.409748 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=21.409729158 podStartE2EDuration="21.409729158s" podCreationTimestamp="2025-11-29 06:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:37:16.406363292 +0000 UTC m=+211.336452055" watchObservedRunningTime="2025-11-29 06:37:16.409729158 +0000 UTC m=+211.339817911" Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.578497 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.652607 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir\") pod \"af440fe2-c5f0-455c-b063-05e0f710814b\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.653118 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access\") pod \"af440fe2-c5f0-455c-b063-05e0f710814b\" (UID: \"af440fe2-c5f0-455c-b063-05e0f710814b\") " Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.652878 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "af440fe2-c5f0-455c-b063-05e0f710814b" (UID: "af440fe2-c5f0-455c-b063-05e0f710814b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.653839 4943 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/af440fe2-c5f0-455c-b063-05e0f710814b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.668690 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "af440fe2-c5f0-455c-b063-05e0f710814b" (UID: "af440fe2-c5f0-455c-b063-05e0f710814b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:17 crc kubenswrapper[4943]: I1129 06:37:17.755163 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/af440fe2-c5f0-455c-b063-05e0f710814b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:18 crc kubenswrapper[4943]: I1129 06:37:18.386902 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"af440fe2-c5f0-455c-b063-05e0f710814b","Type":"ContainerDied","Data":"a70f9158f3b6cdf099f95d1f093c38656aa6bb3927402f7e857586c5911eff89"} Nov 29 06:37:18 crc kubenswrapper[4943]: I1129 06:37:18.386994 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 06:37:18 crc kubenswrapper[4943]: I1129 06:37:18.388886 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a70f9158f3b6cdf099f95d1f093c38656aa6bb3927402f7e857586c5911eff89" Nov 29 06:37:22 crc kubenswrapper[4943]: I1129 06:37:22.336935 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:37:27 crc kubenswrapper[4943]: I1129 06:37:27.445808 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerStarted","Data":"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55"} Nov 29 06:37:27 crc kubenswrapper[4943]: I1129 06:37:27.449170 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerID="c3e2becd245b7f44a990b46cc9b917ac1c3c7d4b59ff8e6cc1d09f5609a60d4c" exitCode=0 Nov 29 06:37:27 crc kubenswrapper[4943]: I1129 06:37:27.449204 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerDied","Data":"c3e2becd245b7f44a990b46cc9b917ac1c3c7d4b59ff8e6cc1d09f5609a60d4c"} Nov 29 06:37:28 crc kubenswrapper[4943]: I1129 06:37:28.459406 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerStarted","Data":"c0d7bf1ab44dd2e2a75be9dbf28e9eb2a0aec7d054f7f0a2ee8584d17cfb6778"} Nov 29 06:37:28 crc kubenswrapper[4943]: I1129 06:37:28.471638 4943 generic.go:334] "Generic (PLEG): container finished" podID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerID="11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55" exitCode=0 Nov 29 06:37:28 crc kubenswrapper[4943]: I1129 06:37:28.471682 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerDied","Data":"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55"} Nov 29 06:37:28 crc kubenswrapper[4943]: I1129 06:37:28.477970 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6rz7x" podStartSLOduration=3.411614642 podStartE2EDuration="1m16.47795509s" podCreationTimestamp="2025-11-29 06:36:12 +0000 UTC" firstStartedPulling="2025-11-29 06:36:14.852736275 +0000 UTC m=+149.782825028" lastFinishedPulling="2025-11-29 06:37:27.919076723 +0000 UTC m=+222.849165476" observedRunningTime="2025-11-29 06:37:28.475534411 +0000 UTC m=+223.405623194" watchObservedRunningTime="2025-11-29 06:37:28.47795509 +0000 UTC m=+223.408043843" Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.478048 4943 generic.go:334] "Generic (PLEG): container finished" podID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerID="f8b1e8d1e96fef28ce3c2dbfcf781cbde0570947ccc2e3de004e1deab855b82e" exitCode=0 Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.478128 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerDied","Data":"f8b1e8d1e96fef28ce3c2dbfcf781cbde0570947ccc2e3de004e1deab855b82e"} Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.479962 4943 generic.go:334] "Generic (PLEG): container finished" podID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerID="2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a" exitCode=0 Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.480013 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerDied","Data":"2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a"} Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.486357 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerStarted","Data":"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273"} Nov 29 06:37:29 crc kubenswrapper[4943]: I1129 06:37:29.537153 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mxcdj" podStartSLOduration=2.257183002 podStartE2EDuration="1m18.537137211s" podCreationTimestamp="2025-11-29 06:36:11 +0000 UTC" firstStartedPulling="2025-11-29 06:36:12.619149679 +0000 UTC m=+147.549238432" lastFinishedPulling="2025-11-29 06:37:28.899103888 +0000 UTC m=+223.829192641" observedRunningTime="2025-11-29 06:37:29.516465177 +0000 UTC m=+224.446553940" watchObservedRunningTime="2025-11-29 06:37:29.537137211 +0000 UTC m=+224.467225954" Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.495396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerStarted","Data":"46084ff836c7e5f257703b6a90ec08b858d505366f7ec5e5314875baa7df20e7"} Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.498114 4943 generic.go:334] "Generic (PLEG): container finished" podID="6220f73c-eb92-4998-b58c-3d6faae45361" containerID="0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5" exitCode=0 Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.498230 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerDied","Data":"0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5"} Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.502001 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerStarted","Data":"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b"} Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.517798 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sr58d" podStartSLOduration=2.113405671 podStartE2EDuration="1m19.517783494s" podCreationTimestamp="2025-11-29 06:36:11 +0000 UTC" firstStartedPulling="2025-11-29 06:36:12.607389864 +0000 UTC m=+147.537478617" lastFinishedPulling="2025-11-29 06:37:30.011767687 +0000 UTC m=+224.941856440" observedRunningTime="2025-11-29 06:37:30.517040533 +0000 UTC m=+225.447129296" watchObservedRunningTime="2025-11-29 06:37:30.517783494 +0000 UTC m=+225.447872247" Nov 29 06:37:30 crc kubenswrapper[4943]: I1129 06:37:30.556617 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q2zm5" podStartSLOduration=2.452924865 podStartE2EDuration="1m17.55659491s" podCreationTimestamp="2025-11-29 06:36:13 +0000 UTC" firstStartedPulling="2025-11-29 06:36:14.862072178 +0000 UTC m=+149.792160931" lastFinishedPulling="2025-11-29 06:37:29.965742223 +0000 UTC m=+224.895830976" observedRunningTime="2025-11-29 06:37:30.553390188 +0000 UTC m=+225.483478941" watchObservedRunningTime="2025-11-29 06:37:30.55659491 +0000 UTC m=+225.486683653" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.343620 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.343667 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.512830 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerStarted","Data":"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb"} Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.534331 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-knbjh" podStartSLOduration=2.222069246 podStartE2EDuration="1m20.534314638s" podCreationTimestamp="2025-11-29 06:36:11 +0000 UTC" firstStartedPulling="2025-11-29 06:36:12.609554277 +0000 UTC m=+147.539643030" lastFinishedPulling="2025-11-29 06:37:30.921799669 +0000 UTC m=+225.851888422" observedRunningTime="2025-11-29 06:37:31.531642101 +0000 UTC m=+226.461730874" watchObservedRunningTime="2025-11-29 06:37:31.534314638 +0000 UTC m=+226.464403391" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.558136 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.558276 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.602522 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.754551 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:31 crc kubenswrapper[4943]: I1129 06:37:31.754633 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.412538 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-sr58d" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="registry-server" probeResult="failure" output=< Nov 29 06:37:32 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:37:32 crc kubenswrapper[4943]: > Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.613483 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.613542 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.613605 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.614169 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.614274 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f" gracePeriod=600 Nov 29 06:37:32 crc kubenswrapper[4943]: I1129 06:37:32.799036 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-knbjh" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="registry-server" probeResult="failure" output=< Nov 29 06:37:32 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:37:32 crc kubenswrapper[4943]: > Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.184127 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.184957 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.230076 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.565925 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.565976 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.569641 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:37:33 crc kubenswrapper[4943]: I1129 06:37:33.604145 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:34 crc kubenswrapper[4943]: I1129 06:37:34.531232 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f" exitCode=0 Nov 29 06:37:34 crc kubenswrapper[4943]: I1129 06:37:34.531315 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f"} Nov 29 06:37:41 crc kubenswrapper[4943]: I1129 06:37:41.380496 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:37:41 crc kubenswrapper[4943]: I1129 06:37:41.425887 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:37:41 crc kubenswrapper[4943]: I1129 06:37:41.597469 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:41 crc kubenswrapper[4943]: I1129 06:37:41.789445 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:41 crc kubenswrapper[4943]: I1129 06:37:41.833043 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.912734 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.913074 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r95nt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dt5vl_openshift-marketplace(c48a9e5e-033e-465e-a3e9-c474245e7b0d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.914359 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dt5vl" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.973835 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.974005 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5s4nm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-p7m24_openshift-marketplace(41b233a1-b997-493b-a71c-e4b9bd816479): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:37:42 crc kubenswrapper[4943]: E1129 06:37:42.975594 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.282096 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.580039 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7"} Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.581773 4943 generic.go:334] "Generic (PLEG): container finished" podID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerID="22b93e6df2027c1ca1d288f4c3b47a243535802087c772c99659980302887b88" exitCode=0 Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.581879 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerDied","Data":"22b93e6df2027c1ca1d288f4c3b47a243535802087c772c99659980302887b88"} Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.582077 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-knbjh" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="registry-server" containerID="cri-o://00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb" gracePeriod=2 Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.620998 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.884227 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:37:43 crc kubenswrapper[4943]: I1129 06:37:43.884614 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mxcdj" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="registry-server" containerID="cri-o://471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273" gracePeriod=2 Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.027772 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.130405 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities\") pod \"6220f73c-eb92-4998-b58c-3d6faae45361\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.130535 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content\") pod \"6220f73c-eb92-4998-b58c-3d6faae45361\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.130590 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8vkp\" (UniqueName: \"kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp\") pod \"6220f73c-eb92-4998-b58c-3d6faae45361\" (UID: \"6220f73c-eb92-4998-b58c-3d6faae45361\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.131589 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities" (OuterVolumeSpecName: "utilities") pod "6220f73c-eb92-4998-b58c-3d6faae45361" (UID: "6220f73c-eb92-4998-b58c-3d6faae45361"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.149262 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp" (OuterVolumeSpecName: "kube-api-access-x8vkp") pod "6220f73c-eb92-4998-b58c-3d6faae45361" (UID: "6220f73c-eb92-4998-b58c-3d6faae45361"). InnerVolumeSpecName "kube-api-access-x8vkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.181046 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6220f73c-eb92-4998-b58c-3d6faae45361" (UID: "6220f73c-eb92-4998-b58c-3d6faae45361"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.232522 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.232577 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220f73c-eb92-4998-b58c-3d6faae45361-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.232595 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8vkp\" (UniqueName: \"kubernetes.io/projected/6220f73c-eb92-4998-b58c-3d6faae45361-kube-api-access-x8vkp\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.353678 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.435293 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content\") pod \"9a026553-a5c9-47ea-bce1-4aa730f8f516\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.435358 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fs7q\" (UniqueName: \"kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q\") pod \"9a026553-a5c9-47ea-bce1-4aa730f8f516\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.435379 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities\") pod \"9a026553-a5c9-47ea-bce1-4aa730f8f516\" (UID: \"9a026553-a5c9-47ea-bce1-4aa730f8f516\") " Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.436415 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities" (OuterVolumeSpecName: "utilities") pod "9a026553-a5c9-47ea-bce1-4aa730f8f516" (UID: "9a026553-a5c9-47ea-bce1-4aa730f8f516"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.442511 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q" (OuterVolumeSpecName: "kube-api-access-2fs7q") pod "9a026553-a5c9-47ea-bce1-4aa730f8f516" (UID: "9a026553-a5c9-47ea-bce1-4aa730f8f516"). InnerVolumeSpecName "kube-api-access-2fs7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.498365 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a026553-a5c9-47ea-bce1-4aa730f8f516" (UID: "9a026553-a5c9-47ea-bce1-4aa730f8f516"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.536720 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.536996 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fs7q\" (UniqueName: \"kubernetes.io/projected/9a026553-a5c9-47ea-bce1-4aa730f8f516-kube-api-access-2fs7q\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.537079 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a026553-a5c9-47ea-bce1-4aa730f8f516-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.589416 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerStarted","Data":"596851d12eac218e8fb111cab4c4ba32b77f71bc838b77b2f6237852fdb87ce8"} Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.591942 4943 generic.go:334] "Generic (PLEG): container finished" podID="6220f73c-eb92-4998-b58c-3d6faae45361" containerID="00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb" exitCode=0 Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.592020 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-knbjh" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.592226 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerDied","Data":"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb"} Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.592367 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-knbjh" event={"ID":"6220f73c-eb92-4998-b58c-3d6faae45361","Type":"ContainerDied","Data":"3cd74aa6987c7d8ec545405717f4462192e87b21d4480f82e63dbb2195904b0d"} Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.592500 4943 scope.go:117] "RemoveContainer" containerID="00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.594429 4943 generic.go:334] "Generic (PLEG): container finished" podID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerID="471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273" exitCode=0 Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.594577 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerDied","Data":"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273"} Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.594698 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mxcdj" event={"ID":"9a026553-a5c9-47ea-bce1-4aa730f8f516","Type":"ContainerDied","Data":"8865ecc97922b5b436a34893516f4c2b7fd4b954ea427000efc81ffb0fc44a5a"} Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.594891 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mxcdj" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.606429 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vxdd2" podStartSLOduration=2.930377704 podStartE2EDuration="1m34.606413581s" podCreationTimestamp="2025-11-29 06:36:10 +0000 UTC" firstStartedPulling="2025-11-29 06:36:12.649718975 +0000 UTC m=+147.579807728" lastFinishedPulling="2025-11-29 06:37:44.325754852 +0000 UTC m=+239.255843605" observedRunningTime="2025-11-29 06:37:44.605601268 +0000 UTC m=+239.535690031" watchObservedRunningTime="2025-11-29 06:37:44.606413581 +0000 UTC m=+239.536502334" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.615193 4943 scope.go:117] "RemoveContainer" containerID="0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.623024 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.626776 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mxcdj"] Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.635779 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.639436 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-knbjh"] Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.645354 4943 scope.go:117] "RemoveContainer" containerID="873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.658575 4943 scope.go:117] "RemoveContainer" containerID="00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.659085 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb\": container with ID starting with 00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb not found: ID does not exist" containerID="00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.659131 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb"} err="failed to get container status \"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb\": rpc error: code = NotFound desc = could not find container \"00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb\": container with ID starting with 00f12715b57d5b9a019c070f3c902a4e6267d16a4d56ccead91f2f0896e83afb not found: ID does not exist" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.659159 4943 scope.go:117] "RemoveContainer" containerID="0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.660049 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5\": container with ID starting with 0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5 not found: ID does not exist" containerID="0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.660096 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5"} err="failed to get container status \"0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5\": rpc error: code = NotFound desc = could not find container \"0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5\": container with ID starting with 0f5232309506b77b1894b7bf348acc7f3391b66d70af41fffe6b7904d7ab42c5 not found: ID does not exist" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.660129 4943 scope.go:117] "RemoveContainer" containerID="873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.660531 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9\": container with ID starting with 873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9 not found: ID does not exist" containerID="873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.660577 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9"} err="failed to get container status \"873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9\": rpc error: code = NotFound desc = could not find container \"873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9\": container with ID starting with 873eae5fdc07803a5e6e6c28a523d5798a3ecc5e997458d901ea5fecc8f383b9 not found: ID does not exist" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.660599 4943 scope.go:117] "RemoveContainer" containerID="471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.674554 4943 scope.go:117] "RemoveContainer" containerID="11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.693287 4943 scope.go:117] "RemoveContainer" containerID="40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.706715 4943 scope.go:117] "RemoveContainer" containerID="471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.707384 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273\": container with ID starting with 471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273 not found: ID does not exist" containerID="471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.707474 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273"} err="failed to get container status \"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273\": rpc error: code = NotFound desc = could not find container \"471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273\": container with ID starting with 471fac727b3fd883c9d391de626b31b2fa00bf315bc1558a5beef2cc703d7273 not found: ID does not exist" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.707541 4943 scope.go:117] "RemoveContainer" containerID="11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.708145 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55\": container with ID starting with 11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55 not found: ID does not exist" containerID="11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.708181 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55"} err="failed to get container status \"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55\": rpc error: code = NotFound desc = could not find container \"11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55\": container with ID starting with 11995ac574ebd25c7f42c171268ed42153863c9b1602e5e163ab6d4531d3de55 not found: ID does not exist" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.708207 4943 scope.go:117] "RemoveContainer" containerID="40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd" Nov 29 06:37:44 crc kubenswrapper[4943]: E1129 06:37:44.708467 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd\": container with ID starting with 40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd not found: ID does not exist" containerID="40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd" Nov 29 06:37:44 crc kubenswrapper[4943]: I1129 06:37:44.708504 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd"} err="failed to get container status \"40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd\": rpc error: code = NotFound desc = could not find container \"40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd\": container with ID starting with 40bd452f614cc9261e68b70cf4bacb8f330e143b2b12a964d9d0e11e065d2bdd not found: ID does not exist" Nov 29 06:37:45 crc kubenswrapper[4943]: I1129 06:37:45.335071 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" path="/var/lib/kubelet/pods/6220f73c-eb92-4998-b58c-3d6faae45361/volumes" Nov 29 06:37:45 crc kubenswrapper[4943]: I1129 06:37:45.335934 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" path="/var/lib/kubelet/pods/9a026553-a5c9-47ea-bce1-4aa730f8f516/volumes" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.281270 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.281870 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q2zm5" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="registry-server" containerID="cri-o://2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b" gracePeriod=2 Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.590834 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.615769 4943 generic.go:334] "Generic (PLEG): container finished" podID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerID="2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b" exitCode=0 Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.615817 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerDied","Data":"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b"} Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.615847 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2zm5" event={"ID":"86599fb0-5276-47e4-af0d-d7e5c3520d5e","Type":"ContainerDied","Data":"b44d9a99199847c84d4fa54908c966312a156850266450d84f0e08fd10d99d41"} Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.615867 4943 scope.go:117] "RemoveContainer" containerID="2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.615915 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2zm5" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.630258 4943 scope.go:117] "RemoveContainer" containerID="2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.642677 4943 scope.go:117] "RemoveContainer" containerID="49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.658445 4943 scope.go:117] "RemoveContainer" containerID="2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b" Nov 29 06:37:46 crc kubenswrapper[4943]: E1129 06:37:46.658908 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b\": container with ID starting with 2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b not found: ID does not exist" containerID="2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.658978 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b"} err="failed to get container status \"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b\": rpc error: code = NotFound desc = could not find container \"2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b\": container with ID starting with 2e8ec361a8631d04c6d97150e5c62de7c6a390e89c92b4ce4eddb3e45386415b not found: ID does not exist" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.659011 4943 scope.go:117] "RemoveContainer" containerID="2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a" Nov 29 06:37:46 crc kubenswrapper[4943]: E1129 06:37:46.659393 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a\": container with ID starting with 2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a not found: ID does not exist" containerID="2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.659445 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a"} err="failed to get container status \"2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a\": rpc error: code = NotFound desc = could not find container \"2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a\": container with ID starting with 2a4910d124daeb5d7872eed984a2191d3e03a096339142d66619f388814edf1a not found: ID does not exist" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.659478 4943 scope.go:117] "RemoveContainer" containerID="49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e" Nov 29 06:37:46 crc kubenswrapper[4943]: E1129 06:37:46.659918 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e\": container with ID starting with 49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e not found: ID does not exist" containerID="49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.659951 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e"} err="failed to get container status \"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e\": rpc error: code = NotFound desc = could not find container \"49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e\": container with ID starting with 49b0249b1cb392e3d81ad6fb7776154bf15f4c5443f5c4347b258222fccd938e not found: ID does not exist" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.762647 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skchv\" (UniqueName: \"kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv\") pod \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.762762 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities\") pod \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.762783 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content\") pod \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\" (UID: \"86599fb0-5276-47e4-af0d-d7e5c3520d5e\") " Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.763799 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities" (OuterVolumeSpecName: "utilities") pod "86599fb0-5276-47e4-af0d-d7e5c3520d5e" (UID: "86599fb0-5276-47e4-af0d-d7e5c3520d5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.767928 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv" (OuterVolumeSpecName: "kube-api-access-skchv") pod "86599fb0-5276-47e4-af0d-d7e5c3520d5e" (UID: "86599fb0-5276-47e4-af0d-d7e5c3520d5e"). InnerVolumeSpecName "kube-api-access-skchv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.779488 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86599fb0-5276-47e4-af0d-d7e5c3520d5e" (UID: "86599fb0-5276-47e4-af0d-d7e5c3520d5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.864083 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.864140 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86599fb0-5276-47e4-af0d-d7e5c3520d5e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.864152 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skchv\" (UniqueName: \"kubernetes.io/projected/86599fb0-5276-47e4-af0d-d7e5c3520d5e-kube-api-access-skchv\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.942579 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:37:46 crc kubenswrapper[4943]: I1129 06:37:46.946794 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2zm5"] Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.334591 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" path="/var/lib/kubelet/pods/86599fb0-5276-47e4-af0d-d7e5c3520d5e/volumes" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.366547 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" containerName="oauth-openshift" containerID="cri-o://071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742" gracePeriod=15 Nov 29 06:37:47 crc kubenswrapper[4943]: E1129 06:37:47.444907 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13d43ef8_ebda_4e16_8616_ac9697607054.slice/crio-071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742.scope\": RecentStats: unable to find data in memory cache]" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.624012 4943 generic.go:334] "Generic (PLEG): container finished" podID="13d43ef8-ebda-4e16-8616-ac9697607054" containerID="071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742" exitCode=0 Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.624953 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" event={"ID":"13d43ef8-ebda-4e16-8616-ac9697607054","Type":"ContainerDied","Data":"071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742"} Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.730802 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875208 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875252 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ghf9\" (UniqueName: \"kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875279 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875325 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875344 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875371 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875752 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.876175 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.875395 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.876248 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.876270 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.876787 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877138 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877247 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877268 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877301 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877341 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error\") pod \"13d43ef8-ebda-4e16-8616-ac9697607054\" (UID: \"13d43ef8-ebda-4e16-8616-ac9697607054\") " Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877547 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877558 4943 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13d43ef8-ebda-4e16-8616-ac9697607054-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.877584 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.878063 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.878080 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.880938 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.881499 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.881663 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.881910 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.882186 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.882753 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.883341 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.883507 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.883880 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9" (OuterVolumeSpecName: "kube-api-access-8ghf9") pod "13d43ef8-ebda-4e16-8616-ac9697607054" (UID: "13d43ef8-ebda-4e16-8616-ac9697607054"). InnerVolumeSpecName "kube-api-access-8ghf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979058 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979130 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ghf9\" (UniqueName: \"kubernetes.io/projected/13d43ef8-ebda-4e16-8616-ac9697607054-kube-api-access-8ghf9\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979159 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979185 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979213 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979238 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979266 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979291 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979317 4943 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13d43ef8-ebda-4e16-8616-ac9697607054-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979340 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:47 crc kubenswrapper[4943]: I1129 06:37:47.979366 4943 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13d43ef8-ebda-4e16-8616-ac9697607054-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:48 crc kubenswrapper[4943]: I1129 06:37:48.631367 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" event={"ID":"13d43ef8-ebda-4e16-8616-ac9697607054","Type":"ContainerDied","Data":"c7b513b982fcdd267bf8fb9d64467529752d301e1371dd99c558d3d5a78b38f7"} Nov 29 06:37:48 crc kubenswrapper[4943]: I1129 06:37:48.631446 4943 scope.go:117] "RemoveContainer" containerID="071f482d3c2b0219db23347af7dade48df3d507cae836d1b4065c8c08f21f742" Nov 29 06:37:48 crc kubenswrapper[4943]: I1129 06:37:48.631529 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-sdcdn" Nov 29 06:37:48 crc kubenswrapper[4943]: I1129 06:37:48.663840 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:37:48 crc kubenswrapper[4943]: I1129 06:37:48.667656 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-sdcdn"] Nov 29 06:37:49 crc kubenswrapper[4943]: I1129 06:37:49.333795 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" path="/var/lib/kubelet/pods/13d43ef8-ebda-4e16-8616-ac9697607054/volumes" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200523 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7b554fccb6-ck55z"] Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200850 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" containerName="oauth-openshift" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200863 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" containerName="oauth-openshift" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200871 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200895 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200926 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200934 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200949 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af440fe2-c5f0-455c-b063-05e0f710814b" containerName="pruner" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200954 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="af440fe2-c5f0-455c-b063-05e0f710814b" containerName="pruner" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200961 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.200967 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.200977 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201003 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.201010 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201015 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.201026 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201032 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="extract-utilities" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.201039 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201046 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.201054 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201079 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="extract-content" Nov 29 06:37:50 crc kubenswrapper[4943]: E1129 06:37:50.201089 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201094 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.201208 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="86599fb0-5276-47e4-af0d-d7e5c3520d5e" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.202335 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="af440fe2-c5f0-455c-b063-05e0f710814b" containerName="pruner" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.202351 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="13d43ef8-ebda-4e16-8616-ac9697607054" containerName="oauth-openshift" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.202361 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a026553-a5c9-47ea-bce1-4aa730f8f516" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.202372 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6220f73c-eb92-4998-b58c-3d6faae45361" containerName="registry-server" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.202778 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.205490 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.205531 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206033 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206086 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206119 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206132 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206193 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206217 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206237 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206288 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.206446 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.211657 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.215094 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7b554fccb6-ck55z"] Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.215338 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.217112 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.229503 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.311766 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.311810 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.311834 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq9hz\" (UniqueName: \"kubernetes.io/projected/0510834c-5544-4e3a-ab0e-8950818e1933-kube-api-access-tq9hz\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.311859 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-login\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-error\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312377 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312422 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-audit-policies\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312648 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312737 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-session\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312782 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312813 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312892 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0510834c-5544-4e3a-ab0e-8950818e1933-audit-dir\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312916 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.312955 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413514 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-login\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413590 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-error\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413619 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413651 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-audit-policies\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413705 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413736 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-session\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413766 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413791 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413839 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0510834c-5544-4e3a-ab0e-8950818e1933-audit-dir\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413864 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413891 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413919 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413948 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.413970 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq9hz\" (UniqueName: \"kubernetes.io/projected/0510834c-5544-4e3a-ab0e-8950818e1933-kube-api-access-tq9hz\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.414410 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-audit-policies\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.415435 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.415507 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0510834c-5544-4e3a-ab0e-8950818e1933-audit-dir\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.416140 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.416790 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.417910 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.417915 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-session\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.418840 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.418950 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.419605 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-error\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.419683 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.421019 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.426756 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0510834c-5544-4e3a-ab0e-8950818e1933-v4-0-config-user-template-login\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.429910 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq9hz\" (UniqueName: \"kubernetes.io/projected/0510834c-5544-4e3a-ab0e-8950818e1933-kube-api-access-tq9hz\") pod \"oauth-openshift-7b554fccb6-ck55z\" (UID: \"0510834c-5544-4e3a-ab0e-8950818e1933\") " pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.533029 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:50 crc kubenswrapper[4943]: I1129 06:37:50.705405 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7b554fccb6-ck55z"] Nov 29 06:37:51 crc kubenswrapper[4943]: I1129 06:37:51.189849 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:37:51 crc kubenswrapper[4943]: I1129 06:37:51.190221 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:37:51 crc kubenswrapper[4943]: I1129 06:37:51.232269 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:37:51 crc kubenswrapper[4943]: I1129 06:37:51.649441 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" event={"ID":"0510834c-5544-4e3a-ab0e-8950818e1933","Type":"ContainerStarted","Data":"c36d5050597ece3c48a0c91e0e17811c0ddf5412437badd62f681f447ba2504e"} Nov 29 06:37:51 crc kubenswrapper[4943]: I1129 06:37:51.685242 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:37:52 crc kubenswrapper[4943]: I1129 06:37:52.656909 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" event={"ID":"0510834c-5544-4e3a-ab0e-8950818e1933","Type":"ContainerStarted","Data":"d0f6509bdcffcd690e9b484340c8d0c07024cfd1ae83430b159117cbb666221a"} Nov 29 06:37:52 crc kubenswrapper[4943]: I1129 06:37:52.657943 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:52 crc kubenswrapper[4943]: I1129 06:37:52.661755 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" Nov 29 06:37:52 crc kubenswrapper[4943]: I1129 06:37:52.686700 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7b554fccb6-ck55z" podStartSLOduration=30.686681373 podStartE2EDuration="30.686681373s" podCreationTimestamp="2025-11-29 06:37:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:37:52.675420169 +0000 UTC m=+247.605508922" watchObservedRunningTime="2025-11-29 06:37:52.686681373 +0000 UTC m=+247.616770216" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.352739 4943 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353668 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353678 4943 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353927 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f" gracePeriod=15 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353973 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb" gracePeriod=15 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353968 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6" gracePeriod=15 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.353969 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d" gracePeriod=15 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354255 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1" gracePeriod=15 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354406 4943 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354549 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354580 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354598 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354609 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354623 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354630 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354641 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354648 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354664 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354672 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354681 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354687 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354791 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354804 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354812 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354819 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354826 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354837 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.354948 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.354961 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.385058 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453397 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453443 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453463 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453488 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453518 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453533 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453617 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.453638 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.554835 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.554921 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.554946 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.554956 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.554983 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555009 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555039 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555055 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555041 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555092 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555119 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555115 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555013 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555172 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555081 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.555191 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.663199 4943 generic.go:334] "Generic (PLEG): container finished" podID="7431070a-052c-42c5-a80e-f5d6853d7cf2" containerID="beeb610892328381484815f9801c68c057c0d701c5b740910fe94a58c351611d" exitCode=0 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.663282 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7431070a-052c-42c5-a80e-f5d6853d7cf2","Type":"ContainerDied","Data":"beeb610892328381484815f9801c68c057c0d701c5b740910fe94a58c351611d"} Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.664220 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.664603 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.664821 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.665810 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.667168 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.667964 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1" exitCode=0 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.667991 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb" exitCode=0 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.668001 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6" exitCode=0 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.668009 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d" exitCode=2 Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.668070 4943 scope.go:117] "RemoveContainer" containerID="c74b5d99a14405ed36be2f20af44dd2acfd598be2f3bfbaf37e61851e9e24070" Nov 29 06:37:53 crc kubenswrapper[4943]: I1129 06:37:53.681421 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:37:53 crc kubenswrapper[4943]: W1129 06:37:53.701864 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-773393cd5871ab9b0a35f3d06c413edd5ccbeb5ad0c79d7bcfb302f36edcca81 WatchSource:0}: Error finding container 773393cd5871ab9b0a35f3d06c413edd5ccbeb5ad0c79d7bcfb302f36edcca81: Status 404 returned error can't find the container with id 773393cd5871ab9b0a35f3d06c413edd5ccbeb5ad0c79d7bcfb302f36edcca81 Nov 29 06:37:53 crc kubenswrapper[4943]: E1129 06:37:53.704010 4943 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.148:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c66e1ba6b0408 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,LastTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.674716 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"773393cd5871ab9b0a35f3d06c413edd5ccbeb5ad0c79d7bcfb302f36edcca81"} Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.678395 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.879277 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.880466 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.880890 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.881253 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972376 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir\") pod \"7431070a-052c-42c5-a80e-f5d6853d7cf2\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972474 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock\") pod \"7431070a-052c-42c5-a80e-f5d6853d7cf2\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972500 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7431070a-052c-42c5-a80e-f5d6853d7cf2" (UID: "7431070a-052c-42c5-a80e-f5d6853d7cf2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972518 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access\") pod \"7431070a-052c-42c5-a80e-f5d6853d7cf2\" (UID: \"7431070a-052c-42c5-a80e-f5d6853d7cf2\") " Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972601 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock" (OuterVolumeSpecName: "var-lock") pod "7431070a-052c-42c5-a80e-f5d6853d7cf2" (UID: "7431070a-052c-42c5-a80e-f5d6853d7cf2"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972928 4943 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-var-lock\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.972946 4943 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7431070a-052c-42c5-a80e-f5d6853d7cf2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:54 crc kubenswrapper[4943]: I1129 06:37:54.978144 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7431070a-052c-42c5-a80e-f5d6853d7cf2" (UID: "7431070a-052c-42c5-a80e-f5d6853d7cf2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.074540 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7431070a-052c-42c5-a80e-f5d6853d7cf2-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.329464 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.329937 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.330416 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.330764 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.331024 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.331266 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: E1129 06:37:55.331520 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.331541 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.685586 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e"} Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.686105 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.686703 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.687129 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.687186 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7431070a-052c-42c5-a80e-f5d6853d7cf2","Type":"ContainerDied","Data":"f420b3e53173a3a57d70f04d692a56cabc7d0d27ff308ff2c683764a45b54af8"} Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.687204 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f420b3e53173a3a57d70f04d692a56cabc7d0d27ff308ff2c683764a45b54af8" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.687224 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.689896 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.690603 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f" exitCode=0 Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.690658 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b68558364c0d8f3a377958b943acb23b41614b94e6fb780ae779bb1259f0e0b7" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.737469 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.738162 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.738698 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.741070 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.742009 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.742514 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.743013 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.743424 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.743759 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.782713 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.782833 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.782900 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.782922 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.782995 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.783041 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.783213 4943 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.783229 4943 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:55 crc kubenswrapper[4943]: I1129 06:37:55.783238 4943 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.327403 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.327798 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.328156 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.328462 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.328813 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: E1129 06:37:56.329184 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dt5vl" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.695847 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.709500 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.709715 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.709885 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.710068 4943 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:56 crc kubenswrapper[4943]: I1129 06:37:56.710238 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:37:57 crc kubenswrapper[4943]: I1129 06:37:57.333225 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 29 06:37:58 crc kubenswrapper[4943]: E1129 06:37:58.197404 4943 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.148:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c66e1ba6b0408 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,LastTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.825916 4943 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.826505 4943 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.826776 4943 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.826990 4943 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.827196 4943 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:00 crc kubenswrapper[4943]: I1129 06:38:00.827221 4943 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 29 06:38:00 crc kubenswrapper[4943]: E1129 06:38:00.827609 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="200ms" Nov 29 06:38:01 crc kubenswrapper[4943]: E1129 06:38:01.028978 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="400ms" Nov 29 06:38:01 crc kubenswrapper[4943]: E1129 06:38:01.430304 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="800ms" Nov 29 06:38:02 crc kubenswrapper[4943]: E1129 06:38:02.231653 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="1.6s" Nov 29 06:38:03 crc kubenswrapper[4943]: E1129 06:38:03.832957 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="3.2s" Nov 29 06:38:05 crc kubenswrapper[4943]: I1129 06:38:05.329472 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:05 crc kubenswrapper[4943]: I1129 06:38:05.330056 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:05 crc kubenswrapper[4943]: I1129 06:38:05.330386 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:05 crc kubenswrapper[4943]: I1129 06:38:05.330673 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.327089 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.328163 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.328752 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.329269 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.329525 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.331662 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.331975 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.332235 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.332450 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.341586 4943 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.341632 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:06 crc kubenswrapper[4943]: E1129 06:38:06.342078 4943 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.342501 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:06 crc kubenswrapper[4943]: I1129 06:38:06.746769 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f44212c7e423d64f3441811a1354b5a7e2a8ea7bd37077a53b4843a9d2ed4950"} Nov 29 06:38:07 crc kubenswrapper[4943]: E1129 06:38:07.036644 4943 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="6.4s" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.753324 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerStarted","Data":"67d9576d7a4c842f1ad09562f347bb36423d2b2086884a6fc58d34f54212b1fa"} Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.755083 4943 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="f1d81584e0f189df56fc73dc13281f012c1ed1990fdac8bbe4c76b336db024cb" exitCode=0 Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.755213 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"f1d81584e0f189df56fc73dc13281f012c1ed1990fdac8bbe4c76b336db024cb"} Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.755666 4943 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.755686 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:07 crc kubenswrapper[4943]: E1129 06:38:07.756134 4943 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.756330 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.756596 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.756839 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.757242 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.758058 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.758407 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.758851 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.759203 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.761531 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.761610 4943 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a" exitCode=1 Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.761643 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a"} Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.761996 4943 scope.go:117] "RemoveContainer" containerID="d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.762394 4943 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.762784 4943 status_manager.go:851] "Failed to get status for pod" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" pod="openshift-marketplace/redhat-operators-p7m24" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7m24\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.763095 4943 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.763488 4943 status_manager.go:851] "Failed to get status for pod" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:07 crc kubenswrapper[4943]: I1129 06:38:07.763876 4943 status_manager.go:851] "Failed to get status for pod" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" pod="openshift-marketplace/redhat-operators-dt5vl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dt5vl\": dial tcp 38.102.83.148:6443: connect: connection refused" Nov 29 06:38:08 crc kubenswrapper[4943]: E1129 06:38:08.198872 4943 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.148:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c66e1ba6b0408 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,LastTimestamp:2025-11-29 06:37:53.70364212 +0000 UTC m=+248.633730873,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.770430 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.770904 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"88a5ea13f88f3b72fa3dac914e9ab3750bf84e697ea5684ee5ac5a52262980c0"} Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.775725 4943 generic.go:334] "Generic (PLEG): container finished" podID="41b233a1-b997-493b-a71c-e4b9bd816479" containerID="67d9576d7a4c842f1ad09562f347bb36423d2b2086884a6fc58d34f54212b1fa" exitCode=0 Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.775816 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerDied","Data":"67d9576d7a4c842f1ad09562f347bb36423d2b2086884a6fc58d34f54212b1fa"} Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.780868 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d49b283a7f3fc107bd0331fca24a01abc4cf9fa7e64f8e994cb08896fab9e628"} Nov 29 06:38:08 crc kubenswrapper[4943]: I1129 06:38:08.780915 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"636e9a714373cda4849b270619e124fc1b451337f9a81115bc01b2fca6f45b1b"} Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.788220 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerStarted","Data":"ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0"} Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792016 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8d8a9e0554caf5e3057df2dfe78e7ac82086efb2807fde457d2643ce1d0d99a2"} Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792048 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4f639c44e53d81bbbf5ffbc14bb5695e18e93d666c3ca7fb52bcda921c75cb07"} Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792058 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7dc9b41b66620aa3e4120db1e715dcd9b87fc5f8d3dd4bfd8522c7570d42d608"} Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792250 4943 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792267 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:09 crc kubenswrapper[4943]: I1129 06:38:09.792450 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:11 crc kubenswrapper[4943]: I1129 06:38:11.343113 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:11 crc kubenswrapper[4943]: I1129 06:38:11.343766 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:11 crc kubenswrapper[4943]: I1129 06:38:11.349227 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:11 crc kubenswrapper[4943]: I1129 06:38:11.805194 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerStarted","Data":"410d3fd0899b7fa9f5c9379b881b10669cafcdee60defc2ea7f6d19a0285107d"} Nov 29 06:38:12 crc kubenswrapper[4943]: I1129 06:38:12.812865 4943 generic.go:334] "Generic (PLEG): container finished" podID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerID="410d3fd0899b7fa9f5c9379b881b10669cafcdee60defc2ea7f6d19a0285107d" exitCode=0 Nov 29 06:38:12 crc kubenswrapper[4943]: I1129 06:38:12.812911 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerDied","Data":"410d3fd0899b7fa9f5c9379b881b10669cafcdee60defc2ea7f6d19a0285107d"} Nov 29 06:38:12 crc kubenswrapper[4943]: I1129 06:38:12.852284 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.679916 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.680272 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.804581 4943 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.824690 4943 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.824929 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:14 crc kubenswrapper[4943]: I1129 06:38:14.828226 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.352362 4943 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f1d7930a-7c6c-4cdf-a079-5784c8bc7d8c" Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.717513 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" probeResult="failure" output=< Nov 29 06:38:15 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:38:15 crc kubenswrapper[4943]: > Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.831140 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerStarted","Data":"ee3e0f918dc0d5d45a96090bc4e867c8b0e32dbdcd2ea5e41a33f2c04f0f2d35"} Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.831396 4943 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.831417 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cfcab1db-586e-49c2-99e1-7886b7d75e47" Nov 29 06:38:15 crc kubenswrapper[4943]: I1129 06:38:15.834225 4943 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f1d7930a-7c6c-4cdf-a079-5784c8bc7d8c" Nov 29 06:38:17 crc kubenswrapper[4943]: I1129 06:38:17.454095 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:38:17 crc kubenswrapper[4943]: I1129 06:38:17.454335 4943 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 29 06:38:17 crc kubenswrapper[4943]: I1129 06:38:17.455594 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.721037 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.759648 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.772855 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.772921 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.809654 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.915485 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 29 06:38:24 crc kubenswrapper[4943]: I1129 06:38:24.916325 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:38:25 crc kubenswrapper[4943]: I1129 06:38:25.310008 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 29 06:38:25 crc kubenswrapper[4943]: I1129 06:38:25.323739 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 29 06:38:25 crc kubenswrapper[4943]: I1129 06:38:25.615866 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 29 06:38:25 crc kubenswrapper[4943]: I1129 06:38:25.809807 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.207423 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.508687 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.775993 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.840081 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.864627 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 06:38:26 crc kubenswrapper[4943]: I1129 06:38:26.991285 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.200179 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.434086 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.443545 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.453699 4943 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.453741 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.644305 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.647867 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.654768 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.672631 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.709958 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.722957 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.836170 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.840712 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 29 06:38:27 crc kubenswrapper[4943]: I1129 06:38:27.963759 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.079236 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.091238 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.151419 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.189969 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.239706 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.353944 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.380458 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.554438 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.564820 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.572070 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.645746 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.745059 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.745637 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.770820 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.802164 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 29 06:38:28 crc kubenswrapper[4943]: I1129 06:38:28.811467 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 29 06:38:29 crc kubenswrapper[4943]: I1129 06:38:29.019703 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 29 06:38:29 crc kubenswrapper[4943]: I1129 06:38:29.020854 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 29 06:38:29 crc kubenswrapper[4943]: I1129 06:38:29.023547 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 06:38:29 crc kubenswrapper[4943]: I1129 06:38:29.028290 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 29 06:38:29 crc kubenswrapper[4943]: I1129 06:38:29.396257 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.403441 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.429180 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.529917 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.542886 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.552798 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.581252 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.691313 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.749446 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.793131 4943 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.794494 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p7m24" podStartSLOduration=22.263419576 podStartE2EDuration="2m15.794473112s" podCreationTimestamp="2025-11-29 06:36:14 +0000 UTC" firstStartedPulling="2025-11-29 06:36:15.977554798 +0000 UTC m=+150.907643551" lastFinishedPulling="2025-11-29 06:38:09.508608334 +0000 UTC m=+264.438697087" observedRunningTime="2025-11-29 06:38:14.778806757 +0000 UTC m=+269.708895530" watchObservedRunningTime="2025-11-29 06:38:29.794473112 +0000 UTC m=+284.724561865" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.795961 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=36.795953358 podStartE2EDuration="36.795953358s" podCreationTimestamp="2025-11-29 06:37:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:38:14.712824517 +0000 UTC m=+269.642913260" watchObservedRunningTime="2025-11-29 06:38:29.795953358 +0000 UTC m=+284.726042111" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.797830 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dt5vl" podStartSLOduration=16.385569015 podStartE2EDuration="2m15.797822361s" podCreationTimestamp="2025-11-29 06:36:14 +0000 UTC" firstStartedPulling="2025-11-29 06:36:15.870981573 +0000 UTC m=+150.801070326" lastFinishedPulling="2025-11-29 06:38:15.283234919 +0000 UTC m=+270.213323672" observedRunningTime="2025-11-29 06:38:15.845963893 +0000 UTC m=+270.776052656" watchObservedRunningTime="2025-11-29 06:38:29.797822361 +0000 UTC m=+284.727911114" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.798556 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.798626 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.801957 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.815543 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.81552531 podStartE2EDuration="15.81552531s" podCreationTimestamp="2025-11-29 06:38:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:38:29.81204193 +0000 UTC m=+284.742130683" watchObservedRunningTime="2025-11-29 06:38:29.81552531 +0000 UTC m=+284.745614063" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.851295 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:29.908210 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.004878 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.244913 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.333220 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.333619 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.336909 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.424074 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.449065 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.479998 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.544315 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.587730 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.594214 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.617941 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.621940 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.878221 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.897514 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.899658 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.907721 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.924599 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:30.994888 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.164791 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.184166 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.194467 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.196117 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.198775 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.207760 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.251412 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.264805 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.299065 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.319836 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.352109 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.432951 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.473853 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.485676 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.505835 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.521695 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.571986 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.652313 4943 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.653511 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.671760 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.894825 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.913643 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.936152 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:31.946785 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.069288 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.099618 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.131793 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.132703 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.300008 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.403592 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.410336 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.427866 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.474491 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.483026 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.490812 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.491785 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.557303 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.569112 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.610887 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.752822 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.810809 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.833382 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.877373 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.889809 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.909262 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:32.989863 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.155989 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.331486 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.344651 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.348041 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.415294 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.575094 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.582324 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.587535 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.633613 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.665076 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.704363 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.839735 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.884279 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.884413 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.908619 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:33.922993 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.129356 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.208450 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.219586 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.296096 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.536341 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.545787 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.549765 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.552954 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.575939 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.660739 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.727276 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.796802 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.851304 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 29 06:38:34 crc kubenswrapper[4943]: I1129 06:38:34.965776 4943 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.121440 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.139435 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.262941 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.262964 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.263157 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.286777 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.292782 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.322131 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.349122 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.400704 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.426684 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.480885 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.581664 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.694727 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.713232 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.796826 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.853287 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.866245 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.912958 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.930279 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 29 06:38:35 crc kubenswrapper[4943]: I1129 06:38:35.961383 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.184869 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.228784 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.258280 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.342682 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.397255 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.428118 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.453050 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.637947 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.641696 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.783587 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.848611 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.866000 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.872161 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.908932 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.915750 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.921111 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 29 06:38:36 crc kubenswrapper[4943]: I1129 06:38:36.951394 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.023898 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.044518 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.094544 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.114100 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.144773 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.182506 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.266862 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.353983 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.386793 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.406138 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.406807 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.414539 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.430054 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.451383 4943 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.451633 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e" gracePeriod=5 Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.454308 4943 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.454384 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.454447 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.455128 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"88a5ea13f88f3b72fa3dac914e9ab3750bf84e697ea5684ee5ac5a52262980c0"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.455232 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://88a5ea13f88f3b72fa3dac914e9ab3750bf84e697ea5684ee5ac5a52262980c0" gracePeriod=30 Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.603898 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.664902 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.701920 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.748625 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.753667 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.818533 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.819180 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.851977 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.968093 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.981977 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 29 06:38:37 crc kubenswrapper[4943]: I1129 06:38:37.988941 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.011967 4943 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.136029 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.143206 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.162401 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.304242 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.407394 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.407868 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.445537 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.445888 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.547709 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.679811 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.692826 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.866777 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.895058 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 29 06:38:38 crc kubenswrapper[4943]: I1129 06:38:38.988005 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.017340 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.142602 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.146248 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.173000 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.186496 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.370687 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.490032 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.490996 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.577644 4943 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.587409 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.666953 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.719994 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.730983 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.869530 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.884292 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.962920 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 29 06:38:39 crc kubenswrapper[4943]: I1129 06:38:39.982645 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 29 06:38:40 crc kubenswrapper[4943]: I1129 06:38:40.250053 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 29 06:38:40 crc kubenswrapper[4943]: I1129 06:38:40.765534 4943 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 29 06:38:40 crc kubenswrapper[4943]: I1129 06:38:40.898368 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 29 06:38:41 crc kubenswrapper[4943]: I1129 06:38:41.929314 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 29 06:38:42 crc kubenswrapper[4943]: I1129 06:38:42.073680 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 29 06:38:42 crc kubenswrapper[4943]: I1129 06:38:42.584871 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 29 06:38:42 crc kubenswrapper[4943]: I1129 06:38:42.926438 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.654529 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.655212 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.797635 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798012 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798145 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798258 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798369 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.797773 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798044 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798198 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798523 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.798994 4943 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.799098 4943 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.799179 4943 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.799270 4943 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.806689 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.900926 4943 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.975777 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.975841 4943 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e" exitCode=137 Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.975886 4943 scope.go:117] "RemoveContainer" containerID="5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.975992 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 06:38:43 crc kubenswrapper[4943]: I1129 06:38:43.998743 4943 scope.go:117] "RemoveContainer" containerID="5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e" Nov 29 06:38:44 crc kubenswrapper[4943]: E1129 06:38:43.999681 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e\": container with ID starting with 5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e not found: ID does not exist" containerID="5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e" Nov 29 06:38:44 crc kubenswrapper[4943]: I1129 06:38:43.999733 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e"} err="failed to get container status \"5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e\": rpc error: code = NotFound desc = could not find container \"5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e\": container with ID starting with 5186cdc21564f0d4496db28d24e48025fe42fd40ae6090da19b1bd4c07e6156e not found: ID does not exist" Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.347908 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.348196 4943 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.362044 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.362330 4943 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="7b10d8d6-ce65-4f8f-9b91-c9b3a5631df9" Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.367834 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 06:38:45 crc kubenswrapper[4943]: I1129 06:38:45.367888 4943 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="7b10d8d6-ce65-4f8f-9b91-c9b3a5631df9" Nov 29 06:38:55 crc kubenswrapper[4943]: I1129 06:38:55.105538 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 29 06:39:05 crc kubenswrapper[4943]: I1129 06:39:05.196823 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:39:05 crc kubenswrapper[4943]: I1129 06:39:05.197661 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" podUID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" containerName="controller-manager" containerID="cri-o://478e0e9eae6f51214fb97a18269ac27724149ce49145c21882b6c92cee5130da" gracePeriod=30 Nov 29 06:39:05 crc kubenswrapper[4943]: I1129 06:39:05.204496 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:39:05 crc kubenswrapper[4943]: I1129 06:39:05.204717 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" podUID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" containerName="route-controller-manager" containerID="cri-o://786dd28d0703d7e7e933f33cf6f7bf1d211944484d939ef59722aec5776554e4" gracePeriod=30 Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.100848 4943 generic.go:334] "Generic (PLEG): container finished" podID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" containerID="478e0e9eae6f51214fb97a18269ac27724149ce49145c21882b6c92cee5130da" exitCode=0 Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.101043 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" event={"ID":"55652a9d-fd7e-4c49-993e-bf1eff1d57f9","Type":"ContainerDied","Data":"478e0e9eae6f51214fb97a18269ac27724149ce49145c21882b6c92cee5130da"} Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.102841 4943 generic.go:334] "Generic (PLEG): container finished" podID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" containerID="786dd28d0703d7e7e933f33cf6f7bf1d211944484d939ef59722aec5776554e4" exitCode=0 Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.102874 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" event={"ID":"1c7844d3-bb90-4f62-8aa2-fe2b64f92343","Type":"ContainerDied","Data":"786dd28d0703d7e7e933f33cf6f7bf1d211944484d939ef59722aec5776554e4"} Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.282688 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.288100 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.322507 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:06 crc kubenswrapper[4943]: E1129 06:39:06.322831 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" containerName="installer" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.322848 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" containerName="installer" Nov 29 06:39:06 crc kubenswrapper[4943]: E1129 06:39:06.322868 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" containerName="route-controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.322876 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" containerName="route-controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: E1129 06:39:06.322891 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.322899 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 06:39:06 crc kubenswrapper[4943]: E1129 06:39:06.322917 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" containerName="controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.322924 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" containerName="controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.323033 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="7431070a-052c-42c5-a80e-f5d6853d7cf2" containerName="installer" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.323047 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.323061 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" containerName="controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.323072 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" containerName="route-controller-manager" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.324039 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.339695 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423285 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca\") pod \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423408 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config\") pod \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423429 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config\") pod \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423660 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qff9\" (UniqueName: \"kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9\") pod \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423718 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles\") pod \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423762 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-452vx\" (UniqueName: \"kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx\") pod \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423798 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca\") pod \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423831 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert\") pod \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\" (UID: \"1c7844d3-bb90-4f62-8aa2-fe2b64f92343\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.423861 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert\") pod \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\" (UID: \"55652a9d-fd7e-4c49-993e-bf1eff1d57f9\") " Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424126 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dphx\" (UniqueName: \"kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424187 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424239 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424339 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca" (OuterVolumeSpecName: "client-ca") pod "1c7844d3-bb90-4f62-8aa2-fe2b64f92343" (UID: "1c7844d3-bb90-4f62-8aa2-fe2b64f92343"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424359 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config" (OuterVolumeSpecName: "config") pod "1c7844d3-bb90-4f62-8aa2-fe2b64f92343" (UID: "1c7844d3-bb90-4f62-8aa2-fe2b64f92343"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424372 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424425 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424438 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424529 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config" (OuterVolumeSpecName: "config") pod "55652a9d-fd7e-4c49-993e-bf1eff1d57f9" (UID: "55652a9d-fd7e-4c49-993e-bf1eff1d57f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424875 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca" (OuterVolumeSpecName: "client-ca") pod "55652a9d-fd7e-4c49-993e-bf1eff1d57f9" (UID: "55652a9d-fd7e-4c49-993e-bf1eff1d57f9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.424918 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "55652a9d-fd7e-4c49-993e-bf1eff1d57f9" (UID: "55652a9d-fd7e-4c49-993e-bf1eff1d57f9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.430624 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1c7844d3-bb90-4f62-8aa2-fe2b64f92343" (UID: "1c7844d3-bb90-4f62-8aa2-fe2b64f92343"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.431393 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx" (OuterVolumeSpecName: "kube-api-access-452vx") pod "55652a9d-fd7e-4c49-993e-bf1eff1d57f9" (UID: "55652a9d-fd7e-4c49-993e-bf1eff1d57f9"). InnerVolumeSpecName "kube-api-access-452vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.431656 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9" (OuterVolumeSpecName: "kube-api-access-7qff9") pod "1c7844d3-bb90-4f62-8aa2-fe2b64f92343" (UID: "1c7844d3-bb90-4f62-8aa2-fe2b64f92343"). InnerVolumeSpecName "kube-api-access-7qff9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.435875 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "55652a9d-fd7e-4c49-993e-bf1eff1d57f9" (UID: "55652a9d-fd7e-4c49-993e-bf1eff1d57f9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525580 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525645 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dphx\" (UniqueName: \"kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525672 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525698 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525771 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525784 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-452vx\" (UniqueName: \"kubernetes.io/projected/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-kube-api-access-452vx\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525794 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525803 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525811 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525821 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55652a9d-fd7e-4c49-993e-bf1eff1d57f9-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.525831 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qff9\" (UniqueName: \"kubernetes.io/projected/1c7844d3-bb90-4f62-8aa2-fe2b64f92343-kube-api-access-7qff9\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.527012 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.527336 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.527368 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.529367 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.547380 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dphx\" (UniqueName: \"kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx\") pod \"controller-manager-67fd5fbfc5-5sgsc\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:06 crc kubenswrapper[4943]: I1129 06:39:06.644003 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.025637 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:07 crc kubenswrapper[4943]: W1129 06:39:07.030452 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36818827_2b96_4281_8f55_4a0122f5f3e2.slice/crio-88bfe1fe45f1d2e77d761f9eead50575ce1f04d6ea21307f2ae2272836f6379f WatchSource:0}: Error finding container 88bfe1fe45f1d2e77d761f9eead50575ce1f04d6ea21307f2ae2272836f6379f: Status 404 returned error can't find the container with id 88bfe1fe45f1d2e77d761f9eead50575ce1f04d6ea21307f2ae2272836f6379f Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.114776 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" event={"ID":"55652a9d-fd7e-4c49-993e-bf1eff1d57f9","Type":"ContainerDied","Data":"a9d4d7657be546c457f1ac79c601263cad2d54254d7d60d7d6628182dc2f3e76"} Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.115254 4943 scope.go:117] "RemoveContainer" containerID="478e0e9eae6f51214fb97a18269ac27724149ce49145c21882b6c92cee5130da" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.115038 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-t7n65" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.116727 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" event={"ID":"36818827-2b96-4281-8f55-4a0122f5f3e2","Type":"ContainerStarted","Data":"88bfe1fe45f1d2e77d761f9eead50575ce1f04d6ea21307f2ae2272836f6379f"} Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.121956 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" event={"ID":"1c7844d3-bb90-4f62-8aa2-fe2b64f92343","Type":"ContainerDied","Data":"bfda611bd70f032dde8678d26dc4208d2493dda29b6857445203d133ccb3e852"} Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.122024 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.137700 4943 scope.go:117] "RemoveContainer" containerID="786dd28d0703d7e7e933f33cf6f7bf1d211944484d939ef59722aec5776554e4" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.151447 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.167332 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-t7n65"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.174377 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.178625 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-hwkpw"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.292235 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.292957 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.296765 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.297184 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.298779 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.298834 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.299201 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.305417 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.306014 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.335514 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c7844d3-bb90-4f62-8aa2-fe2b64f92343" path="/var/lib/kubelet/pods/1c7844d3-bb90-4f62-8aa2-fe2b64f92343/volumes" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.336029 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55652a9d-fd7e-4c49-993e-bf1eff1d57f9" path="/var/lib/kubelet/pods/55652a9d-fd7e-4c49-993e-bf1eff1d57f9/volumes" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.444912 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.444974 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p42g\" (UniqueName: \"kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.445018 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.445250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.546704 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.546795 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.546835 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p42g\" (UniqueName: \"kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.546877 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.547861 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.548437 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.555322 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.571757 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p42g\" (UniqueName: \"kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g\") pod \"route-controller-manager-8f777d457-9md6l\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.606684 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:07 crc kubenswrapper[4943]: I1129 06:39:07.848749 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:07 crc kubenswrapper[4943]: W1129 06:39:07.853638 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58ab1cda_a1c4_44f8_a4d2_3d39b89eaf7f.slice/crio-9e1ab08402b7e491b6f203dee570ebe8a568f20adc8a9633f3a954e9dea636d1 WatchSource:0}: Error finding container 9e1ab08402b7e491b6f203dee570ebe8a568f20adc8a9633f3a954e9dea636d1: Status 404 returned error can't find the container with id 9e1ab08402b7e491b6f203dee570ebe8a568f20adc8a9633f3a954e9dea636d1 Nov 29 06:39:08 crc kubenswrapper[4943]: I1129 06:39:08.127631 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" event={"ID":"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f","Type":"ContainerStarted","Data":"9e1ab08402b7e491b6f203dee570ebe8a568f20adc8a9633f3a954e9dea636d1"} Nov 29 06:39:08 crc kubenswrapper[4943]: I1129 06:39:08.131703 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" event={"ID":"36818827-2b96-4281-8f55-4a0122f5f3e2","Type":"ContainerStarted","Data":"f7df8c0694848d335031bfd24cbdb217652a495b3d3a63d7ac5eb2d901af0f25"} Nov 29 06:39:08 crc kubenswrapper[4943]: I1129 06:39:08.132084 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:08 crc kubenswrapper[4943]: I1129 06:39:08.136425 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:08 crc kubenswrapper[4943]: I1129 06:39:08.149152 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" podStartSLOduration=3.14913296 podStartE2EDuration="3.14913296s" podCreationTimestamp="2025-11-29 06:39:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:39:08.147070722 +0000 UTC m=+323.077159485" watchObservedRunningTime="2025-11-29 06:39:08.14913296 +0000 UTC m=+323.079221713" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.140967 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" event={"ID":"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f","Type":"ContainerStarted","Data":"b72e02b3a2c07aa3d8d6096cb81749000c90fa4b5a1aaca9a03ad6e875452ed3"} Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.141351 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.143894 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.145847 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.145924 4943 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="88a5ea13f88f3b72fa3dac914e9ab3750bf84e697ea5684ee5ac5a52262980c0" exitCode=137 Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.145995 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"88a5ea13f88f3b72fa3dac914e9ab3750bf84e697ea5684ee5ac5a52262980c0"} Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.146034 4943 scope.go:117] "RemoveContainer" containerID="d2ff2a0ede3902dc9f686ff221d217d8160884b2b00e496edcb21ef5ee7e2b8a" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.147784 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:09 crc kubenswrapper[4943]: I1129 06:39:09.162867 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" podStartSLOduration=4.162846438 podStartE2EDuration="4.162846438s" podCreationTimestamp="2025-11-29 06:39:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:39:09.162119951 +0000 UTC m=+324.092208704" watchObservedRunningTime="2025-11-29 06:39:09.162846438 +0000 UTC m=+324.092935211" Nov 29 06:39:10 crc kubenswrapper[4943]: I1129 06:39:10.155266 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 29 06:39:10 crc kubenswrapper[4943]: I1129 06:39:10.156297 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"83c1336043ba0ace9f1c2dd07b99a272a2deb9d177f25436cff507fd44d73bc5"} Nov 29 06:39:12 crc kubenswrapper[4943]: I1129 06:39:12.852555 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:39:17 crc kubenswrapper[4943]: I1129 06:39:17.454773 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:39:17 crc kubenswrapper[4943]: I1129 06:39:17.459148 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:39:22 crc kubenswrapper[4943]: I1129 06:39:22.645939 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:39:22 crc kubenswrapper[4943]: I1129 06:39:22.647410 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dt5vl" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="registry-server" containerID="cri-o://ee3e0f918dc0d5d45a96090bc4e867c8b0e32dbdcd2ea5e41a33f2c04f0f2d35" gracePeriod=2 Nov 29 06:39:22 crc kubenswrapper[4943]: I1129 06:39:22.866785 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.251480 4943 generic.go:334] "Generic (PLEG): container finished" podID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerID="ee3e0f918dc0d5d45a96090bc4e867c8b0e32dbdcd2ea5e41a33f2c04f0f2d35" exitCode=0 Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.251585 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerDied","Data":"ee3e0f918dc0d5d45a96090bc4e867c8b0e32dbdcd2ea5e41a33f2c04f0f2d35"} Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.644651 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.783508 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r95nt\" (UniqueName: \"kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt\") pod \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.783660 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities\") pod \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.783717 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content\") pod \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\" (UID: \"c48a9e5e-033e-465e-a3e9-c474245e7b0d\") " Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.785096 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities" (OuterVolumeSpecName: "utilities") pod "c48a9e5e-033e-465e-a3e9-c474245e7b0d" (UID: "c48a9e5e-033e-465e-a3e9-c474245e7b0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.792261 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt" (OuterVolumeSpecName: "kube-api-access-r95nt") pod "c48a9e5e-033e-465e-a3e9-c474245e7b0d" (UID: "c48a9e5e-033e-465e-a3e9-c474245e7b0d"). InnerVolumeSpecName "kube-api-access-r95nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.885804 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r95nt\" (UniqueName: \"kubernetes.io/projected/c48a9e5e-033e-465e-a3e9-c474245e7b0d-kube-api-access-r95nt\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.885884 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.912350 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c48a9e5e-033e-465e-a3e9-c474245e7b0d" (UID: "c48a9e5e-033e-465e-a3e9-c474245e7b0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:39:23 crc kubenswrapper[4943]: I1129 06:39:23.987422 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c48a9e5e-033e-465e-a3e9-c474245e7b0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.260746 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dt5vl" event={"ID":"c48a9e5e-033e-465e-a3e9-c474245e7b0d","Type":"ContainerDied","Data":"88c266bd6222f92c9d48f9cad2fba563d8dce978ced491dbb1a8051d2ca6bd0b"} Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.260853 4943 scope.go:117] "RemoveContainer" containerID="ee3e0f918dc0d5d45a96090bc4e867c8b0e32dbdcd2ea5e41a33f2c04f0f2d35" Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.260866 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dt5vl" Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.283679 4943 scope.go:117] "RemoveContainer" containerID="410d3fd0899b7fa9f5c9379b881b10669cafcdee60defc2ea7f6d19a0285107d" Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.300673 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.301710 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dt5vl"] Nov 29 06:39:24 crc kubenswrapper[4943]: I1129 06:39:24.327661 4943 scope.go:117] "RemoveContainer" containerID="64e7ebf323b635d729503ce57eeb24a04b2e5d0831112589a7a5d439f27c2b01" Nov 29 06:39:25 crc kubenswrapper[4943]: I1129 06:39:25.334843 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" path="/var/lib/kubelet/pods/c48a9e5e-033e-465e-a3e9-c474245e7b0d/volumes" Nov 29 06:39:28 crc kubenswrapper[4943]: I1129 06:39:28.778114 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:28 crc kubenswrapper[4943]: I1129 06:39:28.779225 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" podUID="36818827-2b96-4281-8f55-4a0122f5f3e2" containerName="controller-manager" containerID="cri-o://f7df8c0694848d335031bfd24cbdb217652a495b3d3a63d7ac5eb2d901af0f25" gracePeriod=30 Nov 29 06:39:28 crc kubenswrapper[4943]: I1129 06:39:28.792628 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:28 crc kubenswrapper[4943]: I1129 06:39:28.792983 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" podUID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" containerName="route-controller-manager" containerID="cri-o://b72e02b3a2c07aa3d8d6096cb81749000c90fa4b5a1aaca9a03ad6e875452ed3" gracePeriod=30 Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.297832 4943 generic.go:334] "Generic (PLEG): container finished" podID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" containerID="b72e02b3a2c07aa3d8d6096cb81749000c90fa4b5a1aaca9a03ad6e875452ed3" exitCode=0 Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.298131 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" event={"ID":"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f","Type":"ContainerDied","Data":"b72e02b3a2c07aa3d8d6096cb81749000c90fa4b5a1aaca9a03ad6e875452ed3"} Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.305160 4943 generic.go:334] "Generic (PLEG): container finished" podID="36818827-2b96-4281-8f55-4a0122f5f3e2" containerID="f7df8c0694848d335031bfd24cbdb217652a495b3d3a63d7ac5eb2d901af0f25" exitCode=0 Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.305232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" event={"ID":"36818827-2b96-4281-8f55-4a0122f5f3e2","Type":"ContainerDied","Data":"f7df8c0694848d335031bfd24cbdb217652a495b3d3a63d7ac5eb2d901af0f25"} Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.401121 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.494379 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.570195 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca\") pod \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.570384 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert\") pod \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.570453 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config\") pod \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.570551 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7p42g\" (UniqueName: \"kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g\") pod \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\" (UID: \"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.571644 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca" (OuterVolumeSpecName: "client-ca") pod "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" (UID: "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.571658 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config" (OuterVolumeSpecName: "config") pod "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" (UID: "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.578329 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" (UID: "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.578971 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g" (OuterVolumeSpecName: "kube-api-access-7p42g") pod "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" (UID: "58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f"). InnerVolumeSpecName "kube-api-access-7p42g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.672548 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles\") pod \"36818827-2b96-4281-8f55-4a0122f5f3e2\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.672812 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dphx\" (UniqueName: \"kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx\") pod \"36818827-2b96-4281-8f55-4a0122f5f3e2\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.672848 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca\") pod \"36818827-2b96-4281-8f55-4a0122f5f3e2\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.672915 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert\") pod \"36818827-2b96-4281-8f55-4a0122f5f3e2\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.672954 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config\") pod \"36818827-2b96-4281-8f55-4a0122f5f3e2\" (UID: \"36818827-2b96-4281-8f55-4a0122f5f3e2\") " Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.673305 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.673330 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.673344 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.673362 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7p42g\" (UniqueName: \"kubernetes.io/projected/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f-kube-api-access-7p42g\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.674135 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "36818827-2b96-4281-8f55-4a0122f5f3e2" (UID: "36818827-2b96-4281-8f55-4a0122f5f3e2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.674188 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca" (OuterVolumeSpecName: "client-ca") pod "36818827-2b96-4281-8f55-4a0122f5f3e2" (UID: "36818827-2b96-4281-8f55-4a0122f5f3e2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.674385 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config" (OuterVolumeSpecName: "config") pod "36818827-2b96-4281-8f55-4a0122f5f3e2" (UID: "36818827-2b96-4281-8f55-4a0122f5f3e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.677130 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx" (OuterVolumeSpecName: "kube-api-access-5dphx") pod "36818827-2b96-4281-8f55-4a0122f5f3e2" (UID: "36818827-2b96-4281-8f55-4a0122f5f3e2"). InnerVolumeSpecName "kube-api-access-5dphx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.677883 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "36818827-2b96-4281-8f55-4a0122f5f3e2" (UID: "36818827-2b96-4281-8f55-4a0122f5f3e2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.774987 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36818827-2b96-4281-8f55-4a0122f5f3e2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.775028 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.775038 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.775050 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dphx\" (UniqueName: \"kubernetes.io/projected/36818827-2b96-4281-8f55-4a0122f5f3e2-kube-api-access-5dphx\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:29 crc kubenswrapper[4943]: I1129 06:39:29.775059 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36818827-2b96-4281-8f55-4a0122f5f3e2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.305693 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:39:30 crc kubenswrapper[4943]: E1129 06:39:30.306068 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="extract-content" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306093 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="extract-content" Nov 29 06:39:30 crc kubenswrapper[4943]: E1129 06:39:30.306109 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="extract-utilities" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306119 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="extract-utilities" Nov 29 06:39:30 crc kubenswrapper[4943]: E1129 06:39:30.306146 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="registry-server" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306154 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="registry-server" Nov 29 06:39:30 crc kubenswrapper[4943]: E1129 06:39:30.306168 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36818827-2b96-4281-8f55-4a0122f5f3e2" containerName="controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306177 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="36818827-2b96-4281-8f55-4a0122f5f3e2" containerName="controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: E1129 06:39:30.306192 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" containerName="route-controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306221 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" containerName="route-controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306353 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c48a9e5e-033e-465e-a3e9-c474245e7b0d" containerName="registry-server" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306376 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="36818827-2b96-4281-8f55-4a0122f5f3e2" containerName="controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.306386 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" containerName="route-controller-manager" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.307001 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.310735 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.311590 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.330301 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.330286 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc" event={"ID":"36818827-2b96-4281-8f55-4a0122f5f3e2","Type":"ContainerDied","Data":"88bfe1fe45f1d2e77d761f9eead50575ce1f04d6ea21307f2ae2272836f6379f"} Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.333180 4943 scope.go:117] "RemoveContainer" containerID="f7df8c0694848d335031bfd24cbdb217652a495b3d3a63d7ac5eb2d901af0f25" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.334174 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.361444 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" event={"ID":"58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f","Type":"ContainerDied","Data":"9e1ab08402b7e491b6f203dee570ebe8a568f20adc8a9633f3a954e9dea636d1"} Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.361548 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.368153 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.369162 4943 scope.go:117] "RemoveContainer" containerID="b72e02b3a2c07aa3d8d6096cb81749000c90fa4b5a1aaca9a03ad6e875452ed3" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.383708 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.394825 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.398808 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-67fd5fbfc5-5sgsc"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.417135 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.421283 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8f777d457-9md6l"] Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.484881 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485191 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485282 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485376 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485469 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htqs6\" (UniqueName: \"kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485731 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485820 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bb6m\" (UniqueName: \"kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485910 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.485984 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.487356 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.587830 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.587888 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.587942 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.587981 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htqs6\" (UniqueName: \"kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.588047 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.588076 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bb6m\" (UniqueName: \"kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.588102 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.588123 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.589060 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.589677 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.589781 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.589843 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.593410 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.600008 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.608985 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htqs6\" (UniqueName: \"kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6\") pod \"controller-manager-597c79bdbb-fzjg4\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.610258 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bb6m\" (UniqueName: \"kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m\") pod \"route-controller-manager-84f5659d8d-jtx5j\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.662869 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.666283 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.892998 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:39:30 crc kubenswrapper[4943]: W1129 06:39:30.906220 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c84a02e_8f2c_4e63_9b5b_4fd2789f234f.slice/crio-8122d1e3532b59640a9b39abf8470cb56a296dd223bb0f9fbf9022691c8f36e7 WatchSource:0}: Error finding container 8122d1e3532b59640a9b39abf8470cb56a296dd223bb0f9fbf9022691c8f36e7: Status 404 returned error can't find the container with id 8122d1e3532b59640a9b39abf8470cb56a296dd223bb0f9fbf9022691c8f36e7 Nov 29 06:39:30 crc kubenswrapper[4943]: I1129 06:39:30.963497 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:39:30 crc kubenswrapper[4943]: W1129 06:39:30.982501 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9c33fcc_6a37_4a64_b0e7_903ff158f02e.slice/crio-4eefa8b984a1654c1f78dc537754810414b49c52e6f62112a49509346ef2004f WatchSource:0}: Error finding container 4eefa8b984a1654c1f78dc537754810414b49c52e6f62112a49509346ef2004f: Status 404 returned error can't find the container with id 4eefa8b984a1654c1f78dc537754810414b49c52e6f62112a49509346ef2004f Nov 29 06:39:31 crc kubenswrapper[4943]: I1129 06:39:31.334747 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36818827-2b96-4281-8f55-4a0122f5f3e2" path="/var/lib/kubelet/pods/36818827-2b96-4281-8f55-4a0122f5f3e2/volumes" Nov 29 06:39:31 crc kubenswrapper[4943]: I1129 06:39:31.335834 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f" path="/var/lib/kubelet/pods/58ab1cda-a1c4-44f8-a4d2-3d39b89eaf7f/volumes" Nov 29 06:39:31 crc kubenswrapper[4943]: I1129 06:39:31.374627 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" event={"ID":"c9c33fcc-6a37-4a64-b0e7-903ff158f02e","Type":"ContainerStarted","Data":"4eefa8b984a1654c1f78dc537754810414b49c52e6f62112a49509346ef2004f"} Nov 29 06:39:31 crc kubenswrapper[4943]: I1129 06:39:31.376549 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" event={"ID":"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f","Type":"ContainerStarted","Data":"8122d1e3532b59640a9b39abf8470cb56a296dd223bb0f9fbf9022691c8f36e7"} Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.382757 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" event={"ID":"c9c33fcc-6a37-4a64-b0e7-903ff158f02e","Type":"ContainerStarted","Data":"1a26a9fc6870dfd0018a3b9e68b59691207c91a73840e0fb805ccda74a112e63"} Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.382820 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.385543 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" event={"ID":"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f","Type":"ContainerStarted","Data":"8c81bbb1f60452f6f8ccbaeded4f7dc91a910b9b0c83c3d02dd90f4cbfd70f14"} Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.385916 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.390176 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.390906 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:39:32 crc kubenswrapper[4943]: I1129 06:39:32.404029 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" podStartSLOduration=4.404006768 podStartE2EDuration="4.404006768s" podCreationTimestamp="2025-11-29 06:39:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:39:32.400974355 +0000 UTC m=+347.331063128" watchObservedRunningTime="2025-11-29 06:39:32.404006768 +0000 UTC m=+347.334095521" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.415364 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" podStartSLOduration=32.415348845 podStartE2EDuration="32.415348845s" podCreationTimestamp="2025-11-29 06:39:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:39:32.453816106 +0000 UTC m=+347.383904869" watchObservedRunningTime="2025-11-29 06:40:00.415348845 +0000 UTC m=+375.345437598" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.416405 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjtkl"] Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.419825 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.446479 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjtkl"] Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594257 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-trusted-ca\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594330 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlcqx\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-kube-api-access-rlcqx\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594407 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594457 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594483 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594515 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-certificates\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594543 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-tls\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.594622 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-bound-sa-token\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.623191 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695655 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695736 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695770 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-certificates\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695797 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-tls\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695834 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-bound-sa-token\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695884 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-trusted-ca\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.695912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlcqx\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-kube-api-access-rlcqx\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.696231 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.697204 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-certificates\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.697378 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-trusted-ca\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.702032 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.702157 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-registry-tls\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.713266 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlcqx\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-kube-api-access-rlcqx\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.723830 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ac8cee73-4b8f-4cbb-a765-78ef2d5e2540-bound-sa-token\") pod \"image-registry-66df7c8f76-qjtkl\" (UID: \"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:00 crc kubenswrapper[4943]: I1129 06:40:00.746813 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:01 crc kubenswrapper[4943]: I1129 06:40:01.206280 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjtkl"] Nov 29 06:40:01 crc kubenswrapper[4943]: I1129 06:40:01.562772 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" event={"ID":"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540","Type":"ContainerStarted","Data":"26b8ba00a16eddc5354c37a1604a3368fe3c347ec8b0be8ff83d66f0bd0fc666"} Nov 29 06:40:02 crc kubenswrapper[4943]: I1129 06:40:02.614807 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:40:02 crc kubenswrapper[4943]: I1129 06:40:02.615287 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:40:03 crc kubenswrapper[4943]: I1129 06:40:03.577224 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" event={"ID":"ac8cee73-4b8f-4cbb-a765-78ef2d5e2540","Type":"ContainerStarted","Data":"6094f400a4992aaf59f5f2f96caccdf1c10d8851fc1ef5622b1eea0299ed8865"} Nov 29 06:40:03 crc kubenswrapper[4943]: I1129 06:40:03.577342 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:03 crc kubenswrapper[4943]: I1129 06:40:03.616444 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" podStartSLOduration=3.616421221 podStartE2EDuration="3.616421221s" podCreationTimestamp="2025-11-29 06:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:40:03.597504513 +0000 UTC m=+378.527593266" watchObservedRunningTime="2025-11-29 06:40:03.616421221 +0000 UTC m=+378.546509984" Nov 29 06:40:05 crc kubenswrapper[4943]: I1129 06:40:05.182720 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:40:05 crc kubenswrapper[4943]: I1129 06:40:05.182966 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" podUID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" containerName="route-controller-manager" containerID="cri-o://1a26a9fc6870dfd0018a3b9e68b59691207c91a73840e0fb805ccda74a112e63" gracePeriod=30 Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.594721 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" containerID="1a26a9fc6870dfd0018a3b9e68b59691207c91a73840e0fb805ccda74a112e63" exitCode=0 Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.594825 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" event={"ID":"c9c33fcc-6a37-4a64-b0e7-903ff158f02e","Type":"ContainerDied","Data":"1a26a9fc6870dfd0018a3b9e68b59691207c91a73840e0fb805ccda74a112e63"} Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.705186 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.881158 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca\") pod \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.881241 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config\") pod \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.881285 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bb6m\" (UniqueName: \"kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m\") pod \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.881313 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert\") pod \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\" (UID: \"c9c33fcc-6a37-4a64-b0e7-903ff158f02e\") " Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.882061 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca" (OuterVolumeSpecName: "client-ca") pod "c9c33fcc-6a37-4a64-b0e7-903ff158f02e" (UID: "c9c33fcc-6a37-4a64-b0e7-903ff158f02e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.882106 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config" (OuterVolumeSpecName: "config") pod "c9c33fcc-6a37-4a64-b0e7-903ff158f02e" (UID: "c9c33fcc-6a37-4a64-b0e7-903ff158f02e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.886336 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m" (OuterVolumeSpecName: "kube-api-access-2bb6m") pod "c9c33fcc-6a37-4a64-b0e7-903ff158f02e" (UID: "c9c33fcc-6a37-4a64-b0e7-903ff158f02e"). InnerVolumeSpecName "kube-api-access-2bb6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.886606 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c9c33fcc-6a37-4a64-b0e7-903ff158f02e" (UID: "c9c33fcc-6a37-4a64-b0e7-903ff158f02e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.983441 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.983510 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.983525 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bb6m\" (UniqueName: \"kubernetes.io/projected/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-kube-api-access-2bb6m\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:06 crc kubenswrapper[4943]: I1129 06:40:06.983539 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9c33fcc-6a37-4a64-b0e7-903ff158f02e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:07 crc kubenswrapper[4943]: I1129 06:40:07.602100 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" event={"ID":"c9c33fcc-6a37-4a64-b0e7-903ff158f02e","Type":"ContainerDied","Data":"4eefa8b984a1654c1f78dc537754810414b49c52e6f62112a49509346ef2004f"} Nov 29 06:40:07 crc kubenswrapper[4943]: I1129 06:40:07.602421 4943 scope.go:117] "RemoveContainer" containerID="1a26a9fc6870dfd0018a3b9e68b59691207c91a73840e0fb805ccda74a112e63" Nov 29 06:40:07 crc kubenswrapper[4943]: I1129 06:40:07.602591 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j" Nov 29 06:40:07 crc kubenswrapper[4943]: I1129 06:40:07.621670 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:40:07 crc kubenswrapper[4943]: I1129 06:40:07.626866 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f5659d8d-jtx5j"] Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.330472 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl"] Nov 29 06:40:08 crc kubenswrapper[4943]: E1129 06:40:08.330729 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" containerName="route-controller-manager" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.330741 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" containerName="route-controller-manager" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.330840 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" containerName="route-controller-manager" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.331296 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.334029 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.334033 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.339040 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.339233 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.339382 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.339747 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.343066 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl"] Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.504017 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-config\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.504096 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-client-ca\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.504116 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w268\" (UniqueName: \"kubernetes.io/projected/c15212b0-d347-4dfa-9751-b6bc15a5b377-kube-api-access-7w268\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.504137 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15212b0-d347-4dfa-9751-b6bc15a5b377-serving-cert\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.605073 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-config\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.605166 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-client-ca\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.605191 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w268\" (UniqueName: \"kubernetes.io/projected/c15212b0-d347-4dfa-9751-b6bc15a5b377-kube-api-access-7w268\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.605220 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15212b0-d347-4dfa-9751-b6bc15a5b377-serving-cert\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.606836 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-client-ca\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.607083 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c15212b0-d347-4dfa-9751-b6bc15a5b377-config\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.617465 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c15212b0-d347-4dfa-9751-b6bc15a5b377-serving-cert\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.621586 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w268\" (UniqueName: \"kubernetes.io/projected/c15212b0-d347-4dfa-9751-b6bc15a5b377-kube-api-access-7w268\") pod \"route-controller-manager-669b76479-bmmxl\" (UID: \"c15212b0-d347-4dfa-9751-b6bc15a5b377\") " pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:08 crc kubenswrapper[4943]: I1129 06:40:08.651145 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:09 crc kubenswrapper[4943]: I1129 06:40:09.044077 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl"] Nov 29 06:40:09 crc kubenswrapper[4943]: I1129 06:40:09.336945 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9c33fcc-6a37-4a64-b0e7-903ff158f02e" path="/var/lib/kubelet/pods/c9c33fcc-6a37-4a64-b0e7-903ff158f02e/volumes" Nov 29 06:40:09 crc kubenswrapper[4943]: I1129 06:40:09.617272 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" event={"ID":"c15212b0-d347-4dfa-9751-b6bc15a5b377","Type":"ContainerStarted","Data":"d2cf50cfdbf2b8fbe483ba4d033ae3e25b63737dd157d2637315d62bb15d813d"} Nov 29 06:40:09 crc kubenswrapper[4943]: I1129 06:40:09.617325 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" event={"ID":"c15212b0-d347-4dfa-9751-b6bc15a5b377","Type":"ContainerStarted","Data":"1de745bdefd5b123c27645cb73a2fcb3816f9027dbca3c01769d305f3c4db54b"} Nov 29 06:40:10 crc kubenswrapper[4943]: I1129 06:40:10.623178 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:10 crc kubenswrapper[4943]: I1129 06:40:10.628803 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" Nov 29 06:40:10 crc kubenswrapper[4943]: I1129 06:40:10.640903 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" podStartSLOduration=5.640890339 podStartE2EDuration="5.640890339s" podCreationTimestamp="2025-11-29 06:40:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:40:10.640339056 +0000 UTC m=+385.570427819" watchObservedRunningTime="2025-11-29 06:40:10.640890339 +0000 UTC m=+385.570979092" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.208376 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.209376 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sr58d" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="registry-server" containerID="cri-o://46084ff836c7e5f257703b6a90ec08b858d505366f7ec5e5314875baa7df20e7" gracePeriod=30 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.221442 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.222793 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vxdd2" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="registry-server" containerID="cri-o://596851d12eac218e8fb111cab4c4ba32b77f71bc838b77b2f6237852fdb87ce8" gracePeriod=30 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.230739 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.231001 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" containerID="cri-o://051624975b58b473f64ada6ab33192bb4aabaadb34e364f5d979362e71f5190b" gracePeriod=30 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.246530 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.246903 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6rz7x" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="registry-server" containerID="cri-o://c0d7bf1ab44dd2e2a75be9dbf28e9eb2a0aec7d054f7f0a2ee8584d17cfb6778" gracePeriod=30 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.261522 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.262419 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" containerID="cri-o://ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" gracePeriod=30 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.263948 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mzndv"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.264879 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.275673 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mzndv"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.395188 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/688a93bc-3061-40cb-a122-02b679922465-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.395269 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrw7f\" (UniqueName: \"kubernetes.io/projected/688a93bc-3061-40cb-a122-02b679922465-kube-api-access-lrw7f\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.395326 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/688a93bc-3061-40cb-a122-02b679922465-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.499126 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/688a93bc-3061-40cb-a122-02b679922465-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.499260 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/688a93bc-3061-40cb-a122-02b679922465-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.499368 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrw7f\" (UniqueName: \"kubernetes.io/projected/688a93bc-3061-40cb-a122-02b679922465-kube-api-access-lrw7f\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.500711 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/688a93bc-3061-40cb-a122-02b679922465-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.507115 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/688a93bc-3061-40cb-a122-02b679922465-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.520365 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrw7f\" (UniqueName: \"kubernetes.io/projected/688a93bc-3061-40cb-a122-02b679922465-kube-api-access-lrw7f\") pod \"marketplace-operator-79b997595-mzndv\" (UID: \"688a93bc-3061-40cb-a122-02b679922465\") " pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.594519 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.656976 4943 generic.go:334] "Generic (PLEG): container finished" podID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerID="596851d12eac218e8fb111cab4c4ba32b77f71bc838b77b2f6237852fdb87ce8" exitCode=0 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.657075 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerDied","Data":"596851d12eac218e8fb111cab4c4ba32b77f71bc838b77b2f6237852fdb87ce8"} Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.664085 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerID="c0d7bf1ab44dd2e2a75be9dbf28e9eb2a0aec7d054f7f0a2ee8584d17cfb6778" exitCode=0 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.664174 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerDied","Data":"c0d7bf1ab44dd2e2a75be9dbf28e9eb2a0aec7d054f7f0a2ee8584d17cfb6778"} Nov 29 06:40:14 crc kubenswrapper[4943]: E1129 06:40:14.683592 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0 is running failed: container process not found" containerID="ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.684667 4943 generic.go:334] "Generic (PLEG): container finished" podID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerID="46084ff836c7e5f257703b6a90ec08b858d505366f7ec5e5314875baa7df20e7" exitCode=0 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.684745 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerDied","Data":"46084ff836c7e5f257703b6a90ec08b858d505366f7ec5e5314875baa7df20e7"} Nov 29 06:40:14 crc kubenswrapper[4943]: E1129 06:40:14.684784 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0 is running failed: container process not found" containerID="ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:40:14 crc kubenswrapper[4943]: E1129 06:40:14.685334 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0 is running failed: container process not found" containerID="ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:40:14 crc kubenswrapper[4943]: E1129 06:40:14.685373 4943 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-p7m24" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.697322 4943 generic.go:334] "Generic (PLEG): container finished" podID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerID="051624975b58b473f64ada6ab33192bb4aabaadb34e364f5d979362e71f5190b" exitCode=0 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.697389 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" event={"ID":"b143e5c3-54ec-40d9-9c11-690cf321df9f","Type":"ContainerDied","Data":"051624975b58b473f64ada6ab33192bb4aabaadb34e364f5d979362e71f5190b"} Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.700880 4943 generic.go:334] "Generic (PLEG): container finished" podID="41b233a1-b997-493b-a71c-e4b9bd816479" containerID="ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" exitCode=0 Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.700913 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerDied","Data":"ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0"} Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.788349 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.920947 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.925168 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content\") pod \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.925259 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities\") pod \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.925404 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frcnn\" (UniqueName: \"kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn\") pod \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\" (UID: \"8911d577-ec61-4e6d-96e1-c51ee6b5477e\") " Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.926781 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities" (OuterVolumeSpecName: "utilities") pod "8911d577-ec61-4e6d-96e1-c51ee6b5477e" (UID: "8911d577-ec61-4e6d-96e1-c51ee6b5477e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.935441 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.935917 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn" (OuterVolumeSpecName: "kube-api-access-frcnn") pod "8911d577-ec61-4e6d-96e1-c51ee6b5477e" (UID: "8911d577-ec61-4e6d-96e1-c51ee6b5477e"). InnerVolumeSpecName "kube-api-access-frcnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.938446 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frcnn\" (UniqueName: \"kubernetes.io/projected/8911d577-ec61-4e6d-96e1-c51ee6b5477e-kube-api-access-frcnn\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.938507 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.956950 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:40:14 crc kubenswrapper[4943]: I1129 06:40:14.960204 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.034602 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8911d577-ec61-4e6d-96e1-c51ee6b5477e" (UID: "8911d577-ec61-4e6d-96e1-c51ee6b5477e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039608 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities\") pod \"3b60b6a4-d67c-4450-b5f3-58d3124be789\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039698 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities\") pod \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039742 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content\") pod \"3b60b6a4-d67c-4450-b5f3-58d3124be789\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039764 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s4nm\" (UniqueName: \"kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm\") pod \"41b233a1-b997-493b-a71c-e4b9bd816479\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039801 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities\") pod \"41b233a1-b997-493b-a71c-e4b9bd816479\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039844 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca\") pod \"b143e5c3-54ec-40d9-9c11-690cf321df9f\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039915 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content\") pod \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.039995 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics\") pod \"b143e5c3-54ec-40d9-9c11-690cf321df9f\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.040018 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content\") pod \"41b233a1-b997-493b-a71c-e4b9bd816479\" (UID: \"41b233a1-b997-493b-a71c-e4b9bd816479\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.040061 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d656k\" (UniqueName: \"kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k\") pod \"3b60b6a4-d67c-4450-b5f3-58d3124be789\" (UID: \"3b60b6a4-d67c-4450-b5f3-58d3124be789\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.040099 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rk2p\" (UniqueName: \"kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p\") pod \"b143e5c3-54ec-40d9-9c11-690cf321df9f\" (UID: \"b143e5c3-54ec-40d9-9c11-690cf321df9f\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.040134 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bt9f5\" (UniqueName: \"kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5\") pod \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\" (UID: \"04357fd3-79d7-4a5c-b4ba-01e1ff2face4\") " Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.040390 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8911d577-ec61-4e6d-96e1-c51ee6b5477e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.041128 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b143e5c3-54ec-40d9-9c11-690cf321df9f" (UID: "b143e5c3-54ec-40d9-9c11-690cf321df9f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.041680 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities" (OuterVolumeSpecName: "utilities") pod "04357fd3-79d7-4a5c-b4ba-01e1ff2face4" (UID: "04357fd3-79d7-4a5c-b4ba-01e1ff2face4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.042009 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities" (OuterVolumeSpecName: "utilities") pod "3b60b6a4-d67c-4450-b5f3-58d3124be789" (UID: "3b60b6a4-d67c-4450-b5f3-58d3124be789"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.047759 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k" (OuterVolumeSpecName: "kube-api-access-d656k") pod "3b60b6a4-d67c-4450-b5f3-58d3124be789" (UID: "3b60b6a4-d67c-4450-b5f3-58d3124be789"). InnerVolumeSpecName "kube-api-access-d656k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.051102 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p" (OuterVolumeSpecName: "kube-api-access-4rk2p") pod "b143e5c3-54ec-40d9-9c11-690cf321df9f" (UID: "b143e5c3-54ec-40d9-9c11-690cf321df9f"). InnerVolumeSpecName "kube-api-access-4rk2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.051043 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities" (OuterVolumeSpecName: "utilities") pod "41b233a1-b997-493b-a71c-e4b9bd816479" (UID: "41b233a1-b997-493b-a71c-e4b9bd816479"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.054198 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm" (OuterVolumeSpecName: "kube-api-access-5s4nm") pod "41b233a1-b997-493b-a71c-e4b9bd816479" (UID: "41b233a1-b997-493b-a71c-e4b9bd816479"). InnerVolumeSpecName "kube-api-access-5s4nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.054331 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b143e5c3-54ec-40d9-9c11-690cf321df9f" (UID: "b143e5c3-54ec-40d9-9c11-690cf321df9f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.057346 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5" (OuterVolumeSpecName: "kube-api-access-bt9f5") pod "04357fd3-79d7-4a5c-b4ba-01e1ff2face4" (UID: "04357fd3-79d7-4a5c-b4ba-01e1ff2face4"). InnerVolumeSpecName "kube-api-access-bt9f5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.063927 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b60b6a4-d67c-4450-b5f3-58d3124be789" (UID: "3b60b6a4-d67c-4450-b5f3-58d3124be789"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.099251 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04357fd3-79d7-4a5c-b4ba-01e1ff2face4" (UID: "04357fd3-79d7-4a5c-b4ba-01e1ff2face4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.131500 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mzndv"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141909 4943 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141941 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d656k\" (UniqueName: \"kubernetes.io/projected/3b60b6a4-d67c-4450-b5f3-58d3124be789-kube-api-access-d656k\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141950 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rk2p\" (UniqueName: \"kubernetes.io/projected/b143e5c3-54ec-40d9-9c11-690cf321df9f-kube-api-access-4rk2p\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141959 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bt9f5\" (UniqueName: \"kubernetes.io/projected/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-kube-api-access-bt9f5\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141989 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.141999 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.142007 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s4nm\" (UniqueName: \"kubernetes.io/projected/41b233a1-b997-493b-a71c-e4b9bd816479-kube-api-access-5s4nm\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.142015 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b60b6a4-d67c-4450-b5f3-58d3124be789-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.142023 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.142031 4943 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b143e5c3-54ec-40d9-9c11-690cf321df9f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.142058 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04357fd3-79d7-4a5c-b4ba-01e1ff2face4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.158279 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41b233a1-b997-493b-a71c-e4b9bd816479" (UID: "41b233a1-b997-493b-a71c-e4b9bd816479"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.243534 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41b233a1-b997-493b-a71c-e4b9bd816479-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.707131 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rz7x" event={"ID":"3b60b6a4-d67c-4450-b5f3-58d3124be789","Type":"ContainerDied","Data":"6e9834ed8098217de9ecb1f5464249c9479fe217b85afd6d0ccf377df920d828"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.707205 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rz7x" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.707524 4943 scope.go:117] "RemoveContainer" containerID="c0d7bf1ab44dd2e2a75be9dbf28e9eb2a0aec7d054f7f0a2ee8584d17cfb6778" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.707882 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" event={"ID":"688a93bc-3061-40cb-a122-02b679922465","Type":"ContainerStarted","Data":"72f80cfa5e70ab956def3016d213e9c08b28f36cb9e9d9c82f221288801bff88"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.710274 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sr58d" event={"ID":"8911d577-ec61-4e6d-96e1-c51ee6b5477e","Type":"ContainerDied","Data":"54d03b8db7ba57d012b0b4bda2083c5a17380c358a7a60dca6eb9e2ee213c503"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.710308 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sr58d" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.715817 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.716585 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxzdk" event={"ID":"b143e5c3-54ec-40d9-9c11-690cf321df9f","Type":"ContainerDied","Data":"434f0c961426d5b612f7f9f09e46a49502f7526b210628897f8bd2df3c085e1e"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.719264 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7m24" event={"ID":"41b233a1-b997-493b-a71c-e4b9bd816479","Type":"ContainerDied","Data":"edec8a592fc73daa7eb1dacc550d1174546b0166db82f3fe054621920330b926"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.719384 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7m24" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.721176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdd2" event={"ID":"04357fd3-79d7-4a5c-b4ba-01e1ff2face4","Type":"ContainerDied","Data":"e88c543c8c06285086c75b14e5b8d08e3cbe597695c47e2f871024ca39d865cb"} Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.721336 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdd2" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.728381 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.730787 4943 scope.go:117] "RemoveContainer" containerID="c3e2becd245b7f44a990b46cc9b917ac1c3c7d4b59ff8e6cc1d09f5609a60d4c" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.731143 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rz7x"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.742727 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.746932 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sr58d"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.755608 4943 scope.go:117] "RemoveContainer" containerID="0788f66a0780680bbe47bd10555adfb0158c79d3afad512631ba4ce4b3346bb1" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.757556 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.761897 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxzdk"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.775608 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.778641 4943 scope.go:117] "RemoveContainer" containerID="46084ff836c7e5f257703b6a90ec08b858d505366f7ec5e5314875baa7df20e7" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.783687 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vxdd2"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.787692 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.797601 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p7m24"] Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.799200 4943 scope.go:117] "RemoveContainer" containerID="f8b1e8d1e96fef28ce3c2dbfcf781cbde0570947ccc2e3de004e1deab855b82e" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.813499 4943 scope.go:117] "RemoveContainer" containerID="e3413492f790a89481d36af5e84f44edbf6c556b0ffa9576b5452eaa4d803b78" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.828904 4943 scope.go:117] "RemoveContainer" containerID="051624975b58b473f64ada6ab33192bb4aabaadb34e364f5d979362e71f5190b" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.847233 4943 scope.go:117] "RemoveContainer" containerID="ae067174ae5e14c47e9ac7e65cca9a705d6ee8e5c76b57132e1a28c0da1915b0" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.864676 4943 scope.go:117] "RemoveContainer" containerID="67d9576d7a4c842f1ad09562f347bb36423d2b2086884a6fc58d34f54212b1fa" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.882408 4943 scope.go:117] "RemoveContainer" containerID="f1e37a834ee922656d1544911480f7e9903a9c667520f5eca1abe6d550e7ae24" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.897377 4943 scope.go:117] "RemoveContainer" containerID="596851d12eac218e8fb111cab4c4ba32b77f71bc838b77b2f6237852fdb87ce8" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.911845 4943 scope.go:117] "RemoveContainer" containerID="22b93e6df2027c1ca1d288f4c3b47a243535802087c772c99659980302887b88" Nov 29 06:40:15 crc kubenswrapper[4943]: I1129 06:40:15.927692 4943 scope.go:117] "RemoveContainer" containerID="6c00f555cf3311aaaccff80347322bf1abd8e9658989cab55d5828c7dbeea95c" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607021 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7s6bv"] Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607599 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607614 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607624 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607633 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607645 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607654 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607663 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607670 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607681 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607688 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607697 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607705 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607714 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607721 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607731 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607739 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607747 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607755 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607766 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607773 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="extract-utilities" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607782 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607789 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607799 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607806 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="extract-content" Nov 29 06:40:16 crc kubenswrapper[4943]: E1129 06:40:16.607819 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607826 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607930 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607945 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" containerName="marketplace-operator" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607957 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607965 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.607974 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" containerName="registry-server" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.608793 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.610469 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.617363 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7s6bv"] Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.671115 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-utilities\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.671156 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-catalog-content\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.671268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49bxv\" (UniqueName: \"kubernetes.io/projected/ad10da80-663e-4d61-b15c-76e7b3005c5a-kube-api-access-49bxv\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.772603 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-utilities\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.772654 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-catalog-content\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.772732 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49bxv\" (UniqueName: \"kubernetes.io/projected/ad10da80-663e-4d61-b15c-76e7b3005c5a-kube-api-access-49bxv\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.773132 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-utilities\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.773135 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad10da80-663e-4d61-b15c-76e7b3005c5a-catalog-content\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.790653 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49bxv\" (UniqueName: \"kubernetes.io/projected/ad10da80-663e-4d61-b15c-76e7b3005c5a-kube-api-access-49bxv\") pod \"certified-operators-7s6bv\" (UID: \"ad10da80-663e-4d61-b15c-76e7b3005c5a\") " pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:16 crc kubenswrapper[4943]: I1129 06:40:16.930853 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.334763 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04357fd3-79d7-4a5c-b4ba-01e1ff2face4" path="/var/lib/kubelet/pods/04357fd3-79d7-4a5c-b4ba-01e1ff2face4/volumes" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.336099 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b60b6a4-d67c-4450-b5f3-58d3124be789" path="/var/lib/kubelet/pods/3b60b6a4-d67c-4450-b5f3-58d3124be789/volumes" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.337358 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41b233a1-b997-493b-a71c-e4b9bd816479" path="/var/lib/kubelet/pods/41b233a1-b997-493b-a71c-e4b9bd816479/volumes" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.343036 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8911d577-ec61-4e6d-96e1-c51ee6b5477e" path="/var/lib/kubelet/pods/8911d577-ec61-4e6d-96e1-c51ee6b5477e/volumes" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.343836 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b143e5c3-54ec-40d9-9c11-690cf321df9f" path="/var/lib/kubelet/pods/b143e5c3-54ec-40d9-9c11-690cf321df9f/volumes" Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.344331 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7s6bv"] Nov 29 06:40:17 crc kubenswrapper[4943]: I1129 06:40:17.738706 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7s6bv" event={"ID":"ad10da80-663e-4d61-b15c-76e7b3005c5a","Type":"ContainerStarted","Data":"21e60f37b0c64395e9bae95dbe6f4cb0c1e6e7595bcf2cb04313dd336b25cc9b"} Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.007033 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.007965 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.009778 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.017603 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.087798 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.087910 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngbs6\" (UniqueName: \"kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.087966 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.189235 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.189536 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngbs6\" (UniqueName: \"kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.189560 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.189874 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.189950 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.206593 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngbs6\" (UniqueName: \"kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6\") pod \"community-operators-mhsb2\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.330726 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.737824 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 06:40:18 crc kubenswrapper[4943]: W1129 06:40:18.754138 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6721f85f_7d2a_4877_ae05_b2bc850bd10e.slice/crio-ecc22e9e4c02bdb664c3d82f0cf78c08e13fbdb8d943534247a9727f49c7c95a WatchSource:0}: Error finding container ecc22e9e4c02bdb664c3d82f0cf78c08e13fbdb8d943534247a9727f49c7c95a: Status 404 returned error can't find the container with id ecc22e9e4c02bdb664c3d82f0cf78c08e13fbdb8d943534247a9727f49c7c95a Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.761972 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" event={"ID":"688a93bc-3061-40cb-a122-02b679922465","Type":"ContainerStarted","Data":"337b5369f351b82f3d407246b590aac6165e63fbbe4f913d4952fe25f3235068"} Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.762468 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.765610 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.766041 4943 generic.go:334] "Generic (PLEG): container finished" podID="ad10da80-663e-4d61-b15c-76e7b3005c5a" containerID="097425d2ec707ecd3acdff59d6b7d46ceb801d1bfd34ddaea2b32bf9ef8ac42b" exitCode=0 Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.766076 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7s6bv" event={"ID":"ad10da80-663e-4d61-b15c-76e7b3005c5a","Type":"ContainerDied","Data":"097425d2ec707ecd3acdff59d6b7d46ceb801d1bfd34ddaea2b32bf9ef8ac42b"} Nov 29 06:40:18 crc kubenswrapper[4943]: I1129 06:40:18.784550 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mzndv" podStartSLOduration=4.784524998 podStartE2EDuration="4.784524998s" podCreationTimestamp="2025-11-29 06:40:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:40:18.779203812 +0000 UTC m=+393.709292585" watchObservedRunningTime="2025-11-29 06:40:18.784524998 +0000 UTC m=+393.714613741" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.020523 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s79pz"] Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.022319 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.024144 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.033938 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s79pz"] Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.102596 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsn4v\" (UniqueName: \"kubernetes.io/projected/48de0189-2ca0-4efc-abfc-50d22fb3abe6-kube-api-access-qsn4v\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.102664 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-utilities\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.102696 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-catalog-content\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.203977 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsn4v\" (UniqueName: \"kubernetes.io/projected/48de0189-2ca0-4efc-abfc-50d22fb3abe6-kube-api-access-qsn4v\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.204030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-utilities\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.204062 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-catalog-content\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.204592 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-catalog-content\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.204594 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48de0189-2ca0-4efc-abfc-50d22fb3abe6-utilities\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.226805 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsn4v\" (UniqueName: \"kubernetes.io/projected/48de0189-2ca0-4efc-abfc-50d22fb3abe6-kube-api-access-qsn4v\") pod \"redhat-marketplace-s79pz\" (UID: \"48de0189-2ca0-4efc-abfc-50d22fb3abe6\") " pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.390262 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.780327 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s79pz"] Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.784075 4943 generic.go:334] "Generic (PLEG): container finished" podID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerID="069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736" exitCode=0 Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.785653 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerDied","Data":"069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736"} Nov 29 06:40:19 crc kubenswrapper[4943]: I1129 06:40:19.785680 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerStarted","Data":"ecc22e9e4c02bdb664c3d82f0cf78c08e13fbdb8d943534247a9727f49c7c95a"} Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.420982 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q8nkf"] Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.422512 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.424536 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.425068 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q8nkf"] Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.538712 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-utilities\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.539160 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-catalog-content\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.539212 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6s6h\" (UniqueName: \"kubernetes.io/projected/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-kube-api-access-w6s6h\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.640657 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-catalog-content\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.640726 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6s6h\" (UniqueName: \"kubernetes.io/projected/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-kube-api-access-w6s6h\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.640764 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-utilities\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.641124 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-catalog-content\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.641212 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-utilities\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.660306 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6s6h\" (UniqueName: \"kubernetes.io/projected/e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f-kube-api-access-w6s6h\") pod \"redhat-operators-q8nkf\" (UID: \"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f\") " pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.745396 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.752997 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-qjtkl" Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.801343 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.823934 4943 generic.go:334] "Generic (PLEG): container finished" podID="ad10da80-663e-4d61-b15c-76e7b3005c5a" containerID="cfdf26cdf951e43269127cffd22586527b5e4bb329d96d445406eb444cf91247" exitCode=0 Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.824439 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7s6bv" event={"ID":"ad10da80-663e-4d61-b15c-76e7b3005c5a","Type":"ContainerDied","Data":"cfdf26cdf951e43269127cffd22586527b5e4bb329d96d445406eb444cf91247"} Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.855052 4943 generic.go:334] "Generic (PLEG): container finished" podID="48de0189-2ca0-4efc-abfc-50d22fb3abe6" containerID="a681484b6348b5e0ec55847e11343a347e91ac924362470e0eea1f4bfe61cdaf" exitCode=0 Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.856280 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s79pz" event={"ID":"48de0189-2ca0-4efc-abfc-50d22fb3abe6","Type":"ContainerDied","Data":"a681484b6348b5e0ec55847e11343a347e91ac924362470e0eea1f4bfe61cdaf"} Nov 29 06:40:20 crc kubenswrapper[4943]: I1129 06:40:20.856315 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s79pz" event={"ID":"48de0189-2ca0-4efc-abfc-50d22fb3abe6","Type":"ContainerStarted","Data":"54b192c263ec5f7b13c7f30cbd04b20891b955af1719961de1782766485e173d"} Nov 29 06:40:21 crc kubenswrapper[4943]: I1129 06:40:21.180323 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q8nkf"] Nov 29 06:40:21 crc kubenswrapper[4943]: I1129 06:40:21.861601 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q8nkf" event={"ID":"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f","Type":"ContainerStarted","Data":"f99273f4f243a6abc90f11f181d8aa355183ef25cde30e737119f6ee5c721daa"} Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.872197 4943 generic.go:334] "Generic (PLEG): container finished" podID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerID="38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec" exitCode=0 Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.872593 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerDied","Data":"38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec"} Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.876674 4943 generic.go:334] "Generic (PLEG): container finished" podID="48de0189-2ca0-4efc-abfc-50d22fb3abe6" containerID="6689394480eb4188f79070afbb669081d24e528ea4dd586ff22a97aee3aef427" exitCode=0 Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.876994 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s79pz" event={"ID":"48de0189-2ca0-4efc-abfc-50d22fb3abe6","Type":"ContainerDied","Data":"6689394480eb4188f79070afbb669081d24e528ea4dd586ff22a97aee3aef427"} Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.881912 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7s6bv" event={"ID":"ad10da80-663e-4d61-b15c-76e7b3005c5a","Type":"ContainerStarted","Data":"78e3425080962fa92b2d4d589042f5c4a995372d1245d579d3ad137662451324"} Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.883297 4943 generic.go:334] "Generic (PLEG): container finished" podID="e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f" containerID="5fd3bc3bdc15bb999a1921ce879f091f76371af2aa98776503ee0edf9641ccd5" exitCode=0 Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.883320 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q8nkf" event={"ID":"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f","Type":"ContainerDied","Data":"5fd3bc3bdc15bb999a1921ce879f091f76371af2aa98776503ee0edf9641ccd5"} Nov 29 06:40:22 crc kubenswrapper[4943]: I1129 06:40:22.921695 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7s6bv" podStartSLOduration=3.312387774 podStartE2EDuration="6.921677336s" podCreationTimestamp="2025-11-29 06:40:16 +0000 UTC" firstStartedPulling="2025-11-29 06:40:18.774769318 +0000 UTC m=+393.704858071" lastFinishedPulling="2025-11-29 06:40:22.38405888 +0000 UTC m=+397.314147633" observedRunningTime="2025-11-29 06:40:22.915462587 +0000 UTC m=+397.845551360" watchObservedRunningTime="2025-11-29 06:40:22.921677336 +0000 UTC m=+397.851766079" Nov 29 06:40:25 crc kubenswrapper[4943]: I1129 06:40:25.161128 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:40:25 crc kubenswrapper[4943]: I1129 06:40:25.162721 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" podUID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" containerName="controller-manager" containerID="cri-o://8c81bbb1f60452f6f8ccbaeded4f7dc91a910b9b0c83c3d02dd90f4cbfd70f14" gracePeriod=30 Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.906123 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerStarted","Data":"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3"} Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.909187 4943 generic.go:334] "Generic (PLEG): container finished" podID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" containerID="8c81bbb1f60452f6f8ccbaeded4f7dc91a910b9b0c83c3d02dd90f4cbfd70f14" exitCode=0 Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.909288 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" event={"ID":"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f","Type":"ContainerDied","Data":"8c81bbb1f60452f6f8ccbaeded4f7dc91a910b9b0c83c3d02dd90f4cbfd70f14"} Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.915307 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s79pz" event={"ID":"48de0189-2ca0-4efc-abfc-50d22fb3abe6","Type":"ContainerStarted","Data":"6662a4d7263e43a273841baca493bd539b995ac8b6f7b4bcef429283a27d04b8"} Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.922968 4943 generic.go:334] "Generic (PLEG): container finished" podID="e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f" containerID="15f6afe9a77224ff3d7fa8ff34d24edb8d4981fe3864d05d9895dce3fa6daa61" exitCode=0 Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.923036 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q8nkf" event={"ID":"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f","Type":"ContainerDied","Data":"15f6afe9a77224ff3d7fa8ff34d24edb8d4981fe3864d05d9895dce3fa6daa61"} Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.928669 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mhsb2" podStartSLOduration=6.23968023 podStartE2EDuration="9.928651025s" podCreationTimestamp="2025-11-29 06:40:17 +0000 UTC" firstStartedPulling="2025-11-29 06:40:19.786689196 +0000 UTC m=+394.716777949" lastFinishedPulling="2025-11-29 06:40:23.475659981 +0000 UTC m=+398.405748744" observedRunningTime="2025-11-29 06:40:26.925914535 +0000 UTC m=+401.856003308" watchObservedRunningTime="2025-11-29 06:40:26.928651025 +0000 UTC m=+401.858739778" Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.931426 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.931484 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.981918 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s79pz" podStartSLOduration=6.186465129 podStartE2EDuration="8.981899651s" podCreationTimestamp="2025-11-29 06:40:18 +0000 UTC" firstStartedPulling="2025-11-29 06:40:20.857041853 +0000 UTC m=+395.787130606" lastFinishedPulling="2025-11-29 06:40:23.652476375 +0000 UTC m=+398.582565128" observedRunningTime="2025-11-29 06:40:26.974955243 +0000 UTC m=+401.905044016" watchObservedRunningTime="2025-11-29 06:40:26.981899651 +0000 UTC m=+401.911988404" Nov 29 06:40:26 crc kubenswrapper[4943]: I1129 06:40:26.985119 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.037653 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.068531 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5d9d469f56-tmk7s"] Nov 29 06:40:27 crc kubenswrapper[4943]: E1129 06:40:27.069325 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" containerName="controller-manager" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.069422 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" containerName="controller-manager" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.069621 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" containerName="controller-manager" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.070272 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.077809 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d9d469f56-tmk7s"] Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181524 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htqs6\" (UniqueName: \"kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6\") pod \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181631 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca\") pod \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181704 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config\") pod \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181731 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert\") pod \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181769 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles\") pod \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\" (UID: \"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f\") " Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.181979 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-config\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182007 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw49z\" (UniqueName: \"kubernetes.io/projected/41c6bb21-5bdf-4a39-a857-52827ebf712c-kube-api-access-hw49z\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182027 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-proxy-ca-bundles\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182051 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-client-ca\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182090 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41c6bb21-5bdf-4a39-a857-52827ebf712c-serving-cert\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182478 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca" (OuterVolumeSpecName: "client-ca") pod "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" (UID: "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.182719 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config" (OuterVolumeSpecName: "config") pod "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" (UID: "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.183083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" (UID: "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.189765 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" (UID: "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.191371 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6" (OuterVolumeSpecName: "kube-api-access-htqs6") pod "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" (UID: "4c84a02e-8f2c-4e63-9b5b-4fd2789f234f"). InnerVolumeSpecName "kube-api-access-htqs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.282912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-config\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.282973 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw49z\" (UniqueName: \"kubernetes.io/projected/41c6bb21-5bdf-4a39-a857-52827ebf712c-kube-api-access-hw49z\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.282996 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-proxy-ca-bundles\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283018 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-client-ca\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283058 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41c6bb21-5bdf-4a39-a857-52827ebf712c-serving-cert\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283112 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htqs6\" (UniqueName: \"kubernetes.io/projected/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-kube-api-access-htqs6\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283123 4943 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283133 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283142 4943 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.283149 4943 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.284342 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-client-ca\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.284682 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-proxy-ca-bundles\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.284830 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41c6bb21-5bdf-4a39-a857-52827ebf712c-config\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.287863 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41c6bb21-5bdf-4a39-a857-52827ebf712c-serving-cert\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.305413 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw49z\" (UniqueName: \"kubernetes.io/projected/41c6bb21-5bdf-4a39-a857-52827ebf712c-kube-api-access-hw49z\") pod \"controller-manager-5d9d469f56-tmk7s\" (UID: \"41c6bb21-5bdf-4a39-a857-52827ebf712c\") " pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.388777 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.800581 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d9d469f56-tmk7s"] Nov 29 06:40:27 crc kubenswrapper[4943]: W1129 06:40:27.806430 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41c6bb21_5bdf_4a39_a857_52827ebf712c.slice/crio-de7d4a5ecc4791a67855a47642bc12a0d809b2d0745bbfccd829af71d30a66a2 WatchSource:0}: Error finding container de7d4a5ecc4791a67855a47642bc12a0d809b2d0745bbfccd829af71d30a66a2: Status 404 returned error can't find the container with id de7d4a5ecc4791a67855a47642bc12a0d809b2d0745bbfccd829af71d30a66a2 Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.932751 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" event={"ID":"41c6bb21-5bdf-4a39-a857-52827ebf712c","Type":"ContainerStarted","Data":"de7d4a5ecc4791a67855a47642bc12a0d809b2d0745bbfccd829af71d30a66a2"} Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.936139 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" event={"ID":"4c84a02e-8f2c-4e63-9b5b-4fd2789f234f","Type":"ContainerDied","Data":"8122d1e3532b59640a9b39abf8470cb56a296dd223bb0f9fbf9022691c8f36e7"} Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.936221 4943 scope.go:117] "RemoveContainer" containerID="8c81bbb1f60452f6f8ccbaeded4f7dc91a910b9b0c83c3d02dd90f4cbfd70f14" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.936359 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-597c79bdbb-fzjg4" Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.960496 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.961705 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-597c79bdbb-fzjg4"] Nov 29 06:40:27 crc kubenswrapper[4943]: I1129 06:40:27.983354 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7s6bv" Nov 29 06:40:28 crc kubenswrapper[4943]: I1129 06:40:28.332553 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:28 crc kubenswrapper[4943]: I1129 06:40:28.332637 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:28 crc kubenswrapper[4943]: I1129 06:40:28.946841 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q8nkf" event={"ID":"e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f","Type":"ContainerStarted","Data":"157b453341136095e6be20a39341efb1fb04c0183ac715b4911fa33f5c74dbe3"} Nov 29 06:40:28 crc kubenswrapper[4943]: I1129 06:40:28.950940 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" event={"ID":"41c6bb21-5bdf-4a39-a857-52827ebf712c","Type":"ContainerStarted","Data":"ef08bfb7c76d8a1cbf42c514e63007d1f6c2d5ea1306e60a4d23ece61653314a"} Nov 29 06:40:28 crc kubenswrapper[4943]: I1129 06:40:28.984463 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q8nkf" podStartSLOduration=3.452863877 podStartE2EDuration="8.984435142s" podCreationTimestamp="2025-11-29 06:40:20 +0000 UTC" firstStartedPulling="2025-11-29 06:40:22.88440493 +0000 UTC m=+397.814493683" lastFinishedPulling="2025-11-29 06:40:28.415976185 +0000 UTC m=+403.346064948" observedRunningTime="2025-11-29 06:40:28.979145306 +0000 UTC m=+403.909234079" watchObservedRunningTime="2025-11-29 06:40:28.984435142 +0000 UTC m=+403.914523905" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.002913 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" podStartSLOduration=4.002883575 podStartE2EDuration="4.002883575s" podCreationTimestamp="2025-11-29 06:40:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:40:29.00077492 +0000 UTC m=+403.930863683" watchObservedRunningTime="2025-11-29 06:40:29.002883575 +0000 UTC m=+403.932972328" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.334553 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c84a02e-8f2c-4e63-9b5b-4fd2789f234f" path="/var/lib/kubelet/pods/4c84a02e-8f2c-4e63-9b5b-4fd2789f234f/volumes" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.370966 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-mhsb2" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="registry-server" probeResult="failure" output=< Nov 29 06:40:29 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:40:29 crc kubenswrapper[4943]: > Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.390791 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.391655 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.438695 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.955828 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:29 crc kubenswrapper[4943]: I1129 06:40:29.961170 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5d9d469f56-tmk7s" Nov 29 06:40:30 crc kubenswrapper[4943]: I1129 06:40:30.746024 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:30 crc kubenswrapper[4943]: I1129 06:40:30.746328 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:31 crc kubenswrapper[4943]: I1129 06:40:31.778982 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q8nkf" podUID="e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f" containerName="registry-server" probeResult="failure" output=< Nov 29 06:40:31 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:40:31 crc kubenswrapper[4943]: > Nov 29 06:40:32 crc kubenswrapper[4943]: I1129 06:40:32.613742 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:40:32 crc kubenswrapper[4943]: I1129 06:40:32.613804 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:40:38 crc kubenswrapper[4943]: I1129 06:40:38.388740 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:38 crc kubenswrapper[4943]: I1129 06:40:38.426689 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 06:40:39 crc kubenswrapper[4943]: I1129 06:40:39.426109 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s79pz" Nov 29 06:40:40 crc kubenswrapper[4943]: I1129 06:40:40.781834 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:40 crc kubenswrapper[4943]: I1129 06:40:40.820485 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q8nkf" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.663919 4943 scope.go:117] "RemoveContainer" containerID="86ee199d90bf798f6840b5bb8cafa22616a349773a696f96577a07a3fbbdec68" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.692930 4943 scope.go:117] "RemoveContainer" containerID="6e0a52bb0673dd247d0d9adf4c5f7cfaee6fec590ef9d242d6e9ec4a9628a6a1" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.716974 4943 scope.go:117] "RemoveContainer" containerID="a27de4e4b0a4ed552a2ad3a286e4f2b9d3f88b1043fd2e96bd74a00d00850efb" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.733117 4943 scope.go:117] "RemoveContainer" containerID="ee37a3f7359b29ba989e2a0c5ac7226837535ee04e08e0d0cea485eaec0fb1a6" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.747670 4943 scope.go:117] "RemoveContainer" containerID="c2812a77f81136907f3c41aef2f6889f87af9e6d58dc6bae74d3356c09899f8d" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.758155 4943 scope.go:117] "RemoveContainer" containerID="c70a6052b5f99e4478bcf2bf51ef248df11c0d9e5dc548fedd31eb315450ec8f" Nov 29 06:40:45 crc kubenswrapper[4943]: I1129 06:40:45.848210 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" podUID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" containerName="registry" containerID="cri-o://67ff8095ff1209486ab413dd7a6f7e39b292e752542190eed375a2de4032a3a0" gracePeriod=30 Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.052456 4943 generic.go:334] "Generic (PLEG): container finished" podID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" containerID="67ff8095ff1209486ab413dd7a6f7e39b292e752542190eed375a2de4032a3a0" exitCode=0 Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.052575 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" event={"ID":"3049b3ed-f405-4ecc-ade1-ad9753e53c1d","Type":"ContainerDied","Data":"67ff8095ff1209486ab413dd7a6f7e39b292e752542190eed375a2de4032a3a0"} Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.163783 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173803 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8kng\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173872 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173894 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173916 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173936 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173955 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.173972 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.174197 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\" (UID: \"3049b3ed-f405-4ecc-ade1-ad9753e53c1d\") " Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.175652 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.176068 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.180223 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.181771 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.188096 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.190440 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng" (OuterVolumeSpecName: "kube-api-access-w8kng") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "kube-api-access-w8kng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.193384 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.195244 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3049b3ed-f405-4ecc-ade1-ad9753e53c1d" (UID: "3049b3ed-f405-4ecc-ade1-ad9753e53c1d"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275077 4943 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275129 4943 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275141 4943 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275155 4943 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275168 4943 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275179 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:49 crc kubenswrapper[4943]: I1129 06:40:49.275190 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8kng\" (UniqueName: \"kubernetes.io/projected/3049b3ed-f405-4ecc-ade1-ad9753e53c1d-kube-api-access-w8kng\") on node \"crc\" DevicePath \"\"" Nov 29 06:40:50 crc kubenswrapper[4943]: I1129 06:40:50.058685 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" event={"ID":"3049b3ed-f405-4ecc-ade1-ad9753e53c1d","Type":"ContainerDied","Data":"63c0a4936cad362f4d9a73d6c824c5a930b46bbb3ab9c27afd75c3716ece2388"} Nov 29 06:40:50 crc kubenswrapper[4943]: I1129 06:40:50.058736 4943 scope.go:117] "RemoveContainer" containerID="67ff8095ff1209486ab413dd7a6f7e39b292e752542190eed375a2de4032a3a0" Nov 29 06:40:50 crc kubenswrapper[4943]: I1129 06:40:50.059747 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vm9wf" Nov 29 06:40:50 crc kubenswrapper[4943]: I1129 06:40:50.082747 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:40:50 crc kubenswrapper[4943]: I1129 06:40:50.084816 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vm9wf"] Nov 29 06:40:51 crc kubenswrapper[4943]: I1129 06:40:51.338956 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" path="/var/lib/kubelet/pods/3049b3ed-f405-4ecc-ade1-ad9753e53c1d/volumes" Nov 29 06:41:02 crc kubenswrapper[4943]: I1129 06:41:02.613577 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:41:02 crc kubenswrapper[4943]: I1129 06:41:02.614194 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:41:02 crc kubenswrapper[4943]: I1129 06:41:02.614243 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:41:02 crc kubenswrapper[4943]: I1129 06:41:02.614849 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:41:02 crc kubenswrapper[4943]: I1129 06:41:02.614902 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7" gracePeriod=600 Nov 29 06:41:03 crc kubenswrapper[4943]: I1129 06:41:03.133813 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7" exitCode=0 Nov 29 06:41:03 crc kubenswrapper[4943]: I1129 06:41:03.133908 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7"} Nov 29 06:41:03 crc kubenswrapper[4943]: I1129 06:41:03.134514 4943 scope.go:117] "RemoveContainer" containerID="9603db2881bfac82d2d6bbd10d1b224afb4caf9c7a643e8e6f467eafc9eef81f" Nov 29 06:41:04 crc kubenswrapper[4943]: I1129 06:41:04.143463 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d"} Nov 29 06:43:32 crc kubenswrapper[4943]: I1129 06:43:32.613059 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:43:32 crc kubenswrapper[4943]: I1129 06:43:32.614778 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:44:02 crc kubenswrapper[4943]: I1129 06:44:02.613682 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:44:02 crc kubenswrapper[4943]: I1129 06:44:02.614230 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:44:32 crc kubenswrapper[4943]: I1129 06:44:32.613961 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:44:32 crc kubenswrapper[4943]: I1129 06:44:32.614707 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:44:32 crc kubenswrapper[4943]: I1129 06:44:32.614778 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:44:32 crc kubenswrapper[4943]: I1129 06:44:32.615685 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:44:32 crc kubenswrapper[4943]: I1129 06:44:32.615788 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d" gracePeriod=600 Nov 29 06:44:34 crc kubenswrapper[4943]: I1129 06:44:34.924702 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d" exitCode=0 Nov 29 06:44:34 crc kubenswrapper[4943]: I1129 06:44:34.924790 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d"} Nov 29 06:44:34 crc kubenswrapper[4943]: I1129 06:44:34.925381 4943 scope.go:117] "RemoveContainer" containerID="6f215c40b459e9f96a43f81f6fb5469259bcf0fe4718e287a6701e572ffdeda7" Nov 29 06:44:35 crc kubenswrapper[4943]: I1129 06:44:35.934345 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f"} Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.154340 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng"] Nov 29 06:45:00 crc kubenswrapper[4943]: E1129 06:45:00.155083 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" containerName="registry" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.155095 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" containerName="registry" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.155185 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3049b3ed-f405-4ecc-ade1-ad9753e53c1d" containerName="registry" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.155533 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.157294 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.160143 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.164322 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng"] Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.273626 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g44vt\" (UniqueName: \"kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.273685 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.273707 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.375202 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g44vt\" (UniqueName: \"kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.375370 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.375441 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.377039 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.383468 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.394452 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g44vt\" (UniqueName: \"kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt\") pod \"collect-profiles-29406645-9bvng\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.474649 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:00 crc kubenswrapper[4943]: I1129 06:45:00.689415 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng"] Nov 29 06:45:01 crc kubenswrapper[4943]: I1129 06:45:01.068261 4943 generic.go:334] "Generic (PLEG): container finished" podID="bddbe380-25b7-4d94-a35e-63630fa940f3" containerID="4cfaa36f3c04bafcac0d5869505e8063f6f243917a5ea3cca4218d4672b45a06" exitCode=0 Nov 29 06:45:01 crc kubenswrapper[4943]: I1129 06:45:01.068365 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" event={"ID":"bddbe380-25b7-4d94-a35e-63630fa940f3","Type":"ContainerDied","Data":"4cfaa36f3c04bafcac0d5869505e8063f6f243917a5ea3cca4218d4672b45a06"} Nov 29 06:45:01 crc kubenswrapper[4943]: I1129 06:45:01.068587 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" event={"ID":"bddbe380-25b7-4d94-a35e-63630fa940f3","Type":"ContainerStarted","Data":"0f9859d4d06be67f0d91fa20ef2f7a57e07e296ae429893d47616b00b969ca3d"} Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.264487 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.402287 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume\") pod \"bddbe380-25b7-4d94-a35e-63630fa940f3\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.402347 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g44vt\" (UniqueName: \"kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt\") pod \"bddbe380-25b7-4d94-a35e-63630fa940f3\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.402406 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume\") pod \"bddbe380-25b7-4d94-a35e-63630fa940f3\" (UID: \"bddbe380-25b7-4d94-a35e-63630fa940f3\") " Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.403348 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume" (OuterVolumeSpecName: "config-volume") pod "bddbe380-25b7-4d94-a35e-63630fa940f3" (UID: "bddbe380-25b7-4d94-a35e-63630fa940f3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.408217 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bddbe380-25b7-4d94-a35e-63630fa940f3" (UID: "bddbe380-25b7-4d94-a35e-63630fa940f3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.408348 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt" (OuterVolumeSpecName: "kube-api-access-g44vt") pod "bddbe380-25b7-4d94-a35e-63630fa940f3" (UID: "bddbe380-25b7-4d94-a35e-63630fa940f3"). InnerVolumeSpecName "kube-api-access-g44vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.504380 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bddbe380-25b7-4d94-a35e-63630fa940f3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.504420 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g44vt\" (UniqueName: \"kubernetes.io/projected/bddbe380-25b7-4d94-a35e-63630fa940f3-kube-api-access-g44vt\") on node \"crc\" DevicePath \"\"" Nov 29 06:45:02 crc kubenswrapper[4943]: I1129 06:45:02.504432 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bddbe380-25b7-4d94-a35e-63630fa940f3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 06:45:03 crc kubenswrapper[4943]: I1129 06:45:03.079214 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" event={"ID":"bddbe380-25b7-4d94-a35e-63630fa940f3","Type":"ContainerDied","Data":"0f9859d4d06be67f0d91fa20ef2f7a57e07e296ae429893d47616b00b969ca3d"} Nov 29 06:45:03 crc kubenswrapper[4943]: I1129 06:45:03.079262 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f9859d4d06be67f0d91fa20ef2f7a57e07e296ae429893d47616b00b969ca3d" Nov 29 06:45:03 crc kubenswrapper[4943]: I1129 06:45:03.079267 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.277604 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-wc5rn"] Nov 29 06:46:12 crc kubenswrapper[4943]: E1129 06:46:12.278455 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bddbe380-25b7-4d94-a35e-63630fa940f3" containerName="collect-profiles" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.278474 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="bddbe380-25b7-4d94-a35e-63630fa940f3" containerName="collect-profiles" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.278618 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="bddbe380-25b7-4d94-a35e-63630fa940f3" containerName="collect-profiles" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.279118 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.281297 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.281500 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.281589 4943 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-mxml5" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.286694 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x7697"] Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.288207 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-x7697" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.295148 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zfdnj"] Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.296619 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.300273 4943 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-24q59" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.300508 4943 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nbpnd" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.301394 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-wc5rn"] Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.350303 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x7697"] Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.350387 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zfdnj"] Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.436509 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgtj9\" (UniqueName: \"kubernetes.io/projected/77f68ba4-9118-4316-9cac-72735ebef023-kube-api-access-cgtj9\") pod \"cert-manager-5b446d88c5-x7697\" (UID: \"77f68ba4-9118-4316-9cac-72735ebef023\") " pod="cert-manager/cert-manager-5b446d88c5-x7697" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.436653 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxc5x\" (UniqueName: \"kubernetes.io/projected/ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c-kube-api-access-nxc5x\") pod \"cert-manager-cainjector-7f985d654d-wc5rn\" (UID: \"ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.436810 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgc9g\" (UniqueName: \"kubernetes.io/projected/71f55aa3-b804-4307-96c1-fa7829f7d7d4-kube-api-access-wgc9g\") pod \"cert-manager-webhook-5655c58dd6-zfdnj\" (UID: \"71f55aa3-b804-4307-96c1-fa7829f7d7d4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.538000 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgc9g\" (UniqueName: \"kubernetes.io/projected/71f55aa3-b804-4307-96c1-fa7829f7d7d4-kube-api-access-wgc9g\") pod \"cert-manager-webhook-5655c58dd6-zfdnj\" (UID: \"71f55aa3-b804-4307-96c1-fa7829f7d7d4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.538084 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgtj9\" (UniqueName: \"kubernetes.io/projected/77f68ba4-9118-4316-9cac-72735ebef023-kube-api-access-cgtj9\") pod \"cert-manager-5b446d88c5-x7697\" (UID: \"77f68ba4-9118-4316-9cac-72735ebef023\") " pod="cert-manager/cert-manager-5b446d88c5-x7697" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.538133 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxc5x\" (UniqueName: \"kubernetes.io/projected/ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c-kube-api-access-nxc5x\") pod \"cert-manager-cainjector-7f985d654d-wc5rn\" (UID: \"ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.563706 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgtj9\" (UniqueName: \"kubernetes.io/projected/77f68ba4-9118-4316-9cac-72735ebef023-kube-api-access-cgtj9\") pod \"cert-manager-5b446d88c5-x7697\" (UID: \"77f68ba4-9118-4316-9cac-72735ebef023\") " pod="cert-manager/cert-manager-5b446d88c5-x7697" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.563785 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxc5x\" (UniqueName: \"kubernetes.io/projected/ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c-kube-api-access-nxc5x\") pod \"cert-manager-cainjector-7f985d654d-wc5rn\" (UID: \"ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.565810 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgc9g\" (UniqueName: \"kubernetes.io/projected/71f55aa3-b804-4307-96c1-fa7829f7d7d4-kube-api-access-wgc9g\") pod \"cert-manager-webhook-5655c58dd6-zfdnj\" (UID: \"71f55aa3-b804-4307-96c1-fa7829f7d7d4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.606502 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.619178 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-x7697" Nov 29 06:46:12 crc kubenswrapper[4943]: I1129 06:46:12.629607 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.167207 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-wc5rn"] Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.169995 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x7697"] Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.177938 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zfdnj"] Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.180416 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 06:46:13 crc kubenswrapper[4943]: W1129 06:46:13.186358 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71f55aa3_b804_4307_96c1_fa7829f7d7d4.slice/crio-cdf82049236dd36860f9ebf3cbb1c0ffc610efda142577122dd91b30e419383c WatchSource:0}: Error finding container cdf82049236dd36860f9ebf3cbb1c0ffc610efda142577122dd91b30e419383c: Status 404 returned error can't find the container with id cdf82049236dd36860f9ebf3cbb1c0ffc610efda142577122dd91b30e419383c Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.473992 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" event={"ID":"71f55aa3-b804-4307-96c1-fa7829f7d7d4","Type":"ContainerStarted","Data":"cdf82049236dd36860f9ebf3cbb1c0ffc610efda142577122dd91b30e419383c"} Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.476384 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x7697" event={"ID":"77f68ba4-9118-4316-9cac-72735ebef023","Type":"ContainerStarted","Data":"ff365c395dc58fd86643ac630659b1759b678f7ada5ec06c76b09f6b093b2a72"} Nov 29 06:46:13 crc kubenswrapper[4943]: I1129 06:46:13.478061 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" event={"ID":"ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c","Type":"ContainerStarted","Data":"c2b47af0eb44fc883d64a98e1475751210c7795ec4ba57d4caee417f00855fdc"} Nov 29 06:46:16 crc kubenswrapper[4943]: I1129 06:46:16.027779 4943 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.511734 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lrsts"] Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512433 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-controller" containerID="cri-o://4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512539 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="nbdb" containerID="cri-o://788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512630 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="sbdb" containerID="cri-o://6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512606 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-node" containerID="cri-o://66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512655 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="northd" containerID="cri-o://f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512539 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.512624 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-acl-logging" containerID="cri-o://e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" gracePeriod=30 Nov 29 06:46:22 crc kubenswrapper[4943]: I1129 06:46:22.551201 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" containerID="cri-o://f91c2b38743f6430b94e6c36188aa34280ee66f6688f3f0dc8fdce3c572704c2" gracePeriod=30 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.561949 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/2.log" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.562924 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/1.log" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.562988 4943 generic.go:334] "Generic (PLEG): container finished" podID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" containerID="5eaaa6b16990ee0172d1ad8e17aa360746d2ac18384845a0a4717f2a0f9fce7d" exitCode=2 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.563099 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerDied","Data":"5eaaa6b16990ee0172d1ad8e17aa360746d2ac18384845a0a4717f2a0f9fce7d"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.563147 4943 scope.go:117] "RemoveContainer" containerID="02c2168e8f8ac30911c2f3873daab9711429901d38044b843d813f468914071a" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.563789 4943 scope.go:117] "RemoveContainer" containerID="5eaaa6b16990ee0172d1ad8e17aa360746d2ac18384845a0a4717f2a0f9fce7d" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.566599 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.569319 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-acl-logging/0.log" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.573129 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.573725 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="f91c2b38743f6430b94e6c36188aa34280ee66f6688f3f0dc8fdce3c572704c2" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.573846 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.573942 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574041 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574208 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574294 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" exitCode=0 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574382 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" exitCode=143 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574463 4943 generic.go:334] "Generic (PLEG): container finished" podID="78ac9747-c331-4c4f-af69-5153d05f4097" containerID="4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" exitCode=143 Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.573840 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"f91c2b38743f6430b94e6c36188aa34280ee66f6688f3f0dc8fdce3c572704c2"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574680 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574809 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574896 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.574983 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.575070 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.575178 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb"} Nov 29 06:46:23 crc kubenswrapper[4943]: I1129 06:46:23.575298 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c"} Nov 29 06:46:25 crc kubenswrapper[4943]: I1129 06:46:25.937425 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:46:25 crc kubenswrapper[4943]: I1129 06:46:25.940507 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-acl-logging/0.log" Nov 29 06:46:25 crc kubenswrapper[4943]: I1129 06:46:25.941449 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:46:25 crc kubenswrapper[4943]: I1129 06:46:25.950672 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.004822 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bjqh2"] Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005133 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kubecfg-setup" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005150 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kubecfg-setup" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005160 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-acl-logging" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005168 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-acl-logging" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005177 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-node" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005186 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-node" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005197 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005204 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005215 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="sbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005222 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="sbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005232 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="nbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005239 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="nbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005250 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005258 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005268 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005275 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005285 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005293 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005306 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005313 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005324 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="northd" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005331 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="northd" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005341 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005350 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: E1129 06:46:26.005359 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005366 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005478 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-node" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005492 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005504 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005514 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="nbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005524 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="northd" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005532 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005541 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005550 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005576 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovnkube-controller" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005590 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="ovn-acl-logging" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005599 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="sbdb" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.005610 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.007770 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.040959 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041310 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041357 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041383 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041408 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041442 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041444 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041468 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041457 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log" (OuterVolumeSpecName: "node-log") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041465 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041508 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041521 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041492 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041497 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041536 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041614 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041648 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041671 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041724 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041770 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfg9s\" (UniqueName: \"kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041796 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041830 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041852 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041881 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041912 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041937 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.041972 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin\") pod \"78ac9747-c331-4c4f-af69-5153d05f4097\" (UID: \"78ac9747-c331-4c4f-af69-5153d05f4097\") " Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042264 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042315 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042361 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042387 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket" (OuterVolumeSpecName: "log-socket") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042410 4943 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042423 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042428 4943 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042462 4943 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042474 4943 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042487 4943 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042497 4943 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042508 4943 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042520 4943 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-node-log\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042528 4943 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042487 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042711 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042285 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash" (OuterVolumeSpecName: "host-slash") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042839 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.042896 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.049709 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s" (OuterVolumeSpecName: "kube-api-access-qfg9s") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "kube-api-access-qfg9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.053873 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.064958 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "78ac9747-c331-4c4f-af69-5153d05f4097" (UID: "78ac9747-c331-4c4f-af69-5153d05f4097"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.143985 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovn-node-metrics-cert\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144030 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb9dr\" (UniqueName: \"kubernetes.io/projected/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-kube-api-access-pb9dr\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144050 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-var-lib-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144072 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-config\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144096 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-node-log\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-bin\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144202 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144253 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-script-lib\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144287 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144311 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-systemd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144345 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-netns\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144368 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-kubelet\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144382 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-env-overrides\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144400 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-slash\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144424 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144445 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-log-socket\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144469 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-etc-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144493 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-netd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144507 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-systemd-units\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144527 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-ovn\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144621 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfg9s\" (UniqueName: \"kubernetes.io/projected/78ac9747-c331-4c4f-af69-5153d05f4097-kube-api-access-qfg9s\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144635 4943 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144645 4943 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78ac9747-c331-4c4f-af69-5153d05f4097-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144653 4943 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-slash\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144662 4943 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144670 4943 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78ac9747-c331-4c4f-af69-5153d05f4097-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144678 4943 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144687 4943 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144696 4943 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144705 4943 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.144713 4943 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/78ac9747-c331-4c4f-af69-5153d05f4097-log-socket\") on node \"crc\" DevicePath \"\"" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.245887 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovn-node-metrics-cert\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.245932 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb9dr\" (UniqueName: \"kubernetes.io/projected/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-kube-api-access-pb9dr\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.245954 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-var-lib-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.245979 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-config\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246004 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-node-log\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246034 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-bin\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246059 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246085 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-script-lib\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246112 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-systemd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246126 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246122 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-node-log\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246166 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-netns\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246170 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-var-lib-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246143 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-netns\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246204 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-systemd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246223 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246244 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-bin\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246264 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246366 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-kubelet\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246758 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-script-lib\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246823 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovnkube-config\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246838 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-kubelet\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-env-overrides\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246933 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-slash\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246977 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-log-socket\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.246994 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247021 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-etc-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247050 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-netd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247073 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-systemd-units\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247117 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-ovn\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247235 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-run-ovn\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247445 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-slash\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247465 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-log-socket\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247503 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-cni-netd\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247528 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-systemd-units\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247532 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-etc-openvswitch\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247751 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-env-overrides\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.247850 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.249092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-ovn-node-metrics-cert\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.261810 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb9dr\" (UniqueName: \"kubernetes.io/projected/b1ba4337-f0a6-4013-8cfd-dc6a757105d1-kube-api-access-pb9dr\") pod \"ovnkube-node-bjqh2\" (UID: \"b1ba4337-f0a6-4013-8cfd-dc6a757105d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.323728 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.595716 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/2.log" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.596555 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"31e0200c7d43563045f395af139cf62a9114166dd3debd5be1dd74a3b4e3f78f"} Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.601057 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovnkube-controller/3.log" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.606336 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-acl-logging/0.log" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.607139 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.608003 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" event={"ID":"78ac9747-c331-4c4f-af69-5153d05f4097","Type":"ContainerDied","Data":"f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1"} Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.608075 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lrsts" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.653614 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lrsts"] Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.665193 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lrsts"] Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.677864 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:46:26 crc kubenswrapper[4943]: I1129 06:46:26.708802 4943 scope.go:117] "RemoveContainer" containerID="f91c2b38743f6430b94e6c36188aa34280ee66f6688f3f0dc8fdce3c572704c2" Nov 29 06:46:27 crc kubenswrapper[4943]: W1129 06:46:27.036597 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1ba4337_f0a6_4013_8cfd_dc6a757105d1.slice/crio-4474b655cee1a2885f94beb1d0e09cf8cf851802e1f09eeb692211be2d773a9e WatchSource:0}: Error finding container 4474b655cee1a2885f94beb1d0e09cf8cf851802e1f09eeb692211be2d773a9e: Status 404 returned error can't find the container with id 4474b655cee1a2885f94beb1d0e09cf8cf851802e1f09eeb692211be2d773a9e Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.046532 4943 scope.go:117] "RemoveContainer" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:46:27 crc kubenswrapper[4943]: E1129 06:46:27.048287 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\": container with ID starting with 90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5 not found: ID does not exist" containerID="90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.048324 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5"} err="failed to get container status \"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\": rpc error: code = NotFound desc = could not find container \"90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5\": container with ID starting with 90b9358c791668b114758aa321ee938f6b372c2ef9080cba7936e3c336e0aaa5 not found: ID does not exist" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.048348 4943 scope.go:117] "RemoveContainer" containerID="6d1f290d4b13b3ca6a9bae8448ade6680f093234fd949526b75e11cd31e7b156" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.131180 4943 scope.go:117] "RemoveContainer" containerID="788bd70c01b8c93a5320dcb237cd6d54f2dae6742a3fbedb1648272359e2cc3b" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.229115 4943 scope.go:117] "RemoveContainer" containerID="f237078da4c5ec570bcd72604599da879c6c6a9bba2a6850c58f7f47f2cf7fb8" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.337459 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78ac9747-c331-4c4f-af69-5153d05f4097" path="/var/lib/kubelet/pods/78ac9747-c331-4c4f-af69-5153d05f4097/volumes" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.614082 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"4474b655cee1a2885f94beb1d0e09cf8cf851802e1f09eeb692211be2d773a9e"} Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.615918 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/2.log" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.618856 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-acl-logging/0.log" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.619517 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.789136 4943 scope.go:117] "RemoveContainer" containerID="5982fe2322f692f817d494fa558e0d21e4b321ee935b1163591c5aff992d0e18" Nov 29 06:46:27 crc kubenswrapper[4943]: I1129 06:46:27.958530 4943 scope.go:117] "RemoveContainer" containerID="66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" Nov 29 06:46:28 crc kubenswrapper[4943]: I1129 06:46:28.630081 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-acl-logging/0.log" Nov 29 06:46:28 crc kubenswrapper[4943]: I1129 06:46:28.630711 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:46:28 crc kubenswrapper[4943]: I1129 06:46:28.633844 4943 generic.go:334] "Generic (PLEG): container finished" podID="b1ba4337-f0a6-4013-8cfd-dc6a757105d1" containerID="c556a537be30ed758d5e7138eadf9dddadef3513819b03ac475dfa9f50e9e6de" exitCode=0 Nov 29 06:46:28 crc kubenswrapper[4943]: I1129 06:46:28.633885 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerDied","Data":"c556a537be30ed758d5e7138eadf9dddadef3513819b03ac475dfa9f50e9e6de"} Nov 29 06:46:45 crc kubenswrapper[4943]: I1129 06:46:45.903131 4943 scope.go:117] "RemoveContainer" containerID="66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" Nov 29 06:46:55 crc kubenswrapper[4943]: I1129 06:46:55.570961 4943 scope.go:117] "RemoveContainer" containerID="e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" Nov 29 06:46:55 crc kubenswrapper[4943]: E1129 06:46:55.571700 4943 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-rbac-proxy-node_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 from index: no such id: '66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198'" containerID="66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" Nov 29 06:46:55 crc kubenswrapper[4943]: E1129 06:46:55.571738 4943 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-rbac-proxy-node_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 from index: no such id: '66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198'" containerID="66741849c9fcfc1b66658375b60003a20a20ee6485349c54df788bd389fb0198" Nov 29 06:46:55 crc kubenswrapper[4943]: I1129 06:46:55.571760 4943 scope.go:117] "RemoveContainer" containerID="e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" Nov 29 06:46:55 crc kubenswrapper[4943]: I1129 06:46:55.772833 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lrsts_78ac9747-c331-4c4f-af69-5153d05f4097/ovn-controller/0.log" Nov 29 06:47:02 crc kubenswrapper[4943]: I1129 06:47:02.613956 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:47:02 crc kubenswrapper[4943]: I1129 06:47:02.614295 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.361243 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="quay.io/jetstack/cert-manager-controller:v1.14.4" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.361469 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-controller,Image:quay.io/jetstack/cert-manager-controller:v1.14.4,Command:[],Args:[--v=2 --cluster-resource-namespace=$(POD_NAMESPACE) --leader-election-namespace=kube-system --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.14.4 --max-concurrent-challenges=60],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9402,Protocol:TCP,HostIP:,},ContainerPort{Name:http-healthz,HostPort:0,ContainerPort:9403,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cgtj9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{1 0 http-healthz},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:15,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:8,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000680000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-5b446d88c5-x7697_cert-manager(77f68ba4-9118-4316-9cac-72735ebef023): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.362702 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="cert-manager/cert-manager-5b446d88c5-x7697" podUID="77f68ba4-9118-4316-9cac-72735ebef023" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.417153 4943 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-acl-logging_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1: identifier is not a container" containerID="e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.417239 4943 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-acl-logging_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1: identifier is not a container" containerID="e82fdf8d6e67b321371519b1bd9b287e6ed077f5f50ecad5025d646588f450bb" Nov 29 06:47:03 crc kubenswrapper[4943]: I1129 06:47:03.417269 4943 scope.go:117] "RemoveContainer" containerID="4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" Nov 29 06:47:03 crc kubenswrapper[4943]: I1129 06:47:03.417359 4943 scope.go:117] "RemoveContainer" containerID="61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4" Nov 29 06:47:03 crc kubenswrapper[4943]: I1129 06:47:03.438852 4943 scope.go:117] "RemoveContainer" containerID="4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.445965 4943 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-controller_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 from index: no such id: '4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c'" containerID="4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.446051 4943 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_ovn-controller_ovnkube-node-lrsts_openshift-ovn-kubernetes_78ac9747-c331-4c4f-af69-5153d05f4097_0 in pod sandbox f142de5121f5650224d7f90e485a5c07991a21b70c6140cebd1c67063004bdf1 from index: no such id: '4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c'" containerID="4f0faf7db4ca758d8f6e65668ea94ac9ef6753bded405d1b95d1ed519a3a332c" Nov 29 06:47:03 crc kubenswrapper[4943]: I1129 06:47:03.446237 4943 scope.go:117] "RemoveContainer" containerID="61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.447989 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\": container with ID starting with 61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4 not found: ID does not exist" containerID="61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4" Nov 29 06:47:03 crc kubenswrapper[4943]: I1129 06:47:03.448047 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4"} err="failed to get container status \"61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\": rpc error: code = NotFound desc = could not find container \"61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4\": container with ID starting with 61eadeab31dd7bae29c139c24d09370288eb7a1e32fcf0e33ec52c15f4c645e4 not found: ID does not exist" Nov 29 06:47:03 crc kubenswrapper[4943]: E1129 06:47:03.816573 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/jetstack/cert-manager-controller:v1.14.4\\\"\"" pod="cert-manager/cert-manager-5b446d88c5-x7697" podUID="77f68ba4-9118-4316-9cac-72735ebef023" Nov 29 06:47:04 crc kubenswrapper[4943]: E1129 06:47:04.065506 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="quay.io/jetstack/cert-manager-webhook:v1.14.4" Nov 29 06:47:04 crc kubenswrapper[4943]: E1129 06:47:04.065700 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-webhook,Image:quay.io/jetstack/cert-manager-webhook:v1.14.4,Command:[],Args:[--v=2 --secure-port=10250 --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) --dynamic-serving-ca-secret-name=cert-manager-webhook-ca --dynamic-serving-dns-names=cert-manager-webhook --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:10250,Protocol:TCP,HostIP:,},ContainerPort{Name:healthcheck,HostPort:0,ContainerPort:6080,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wgc9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 6080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:60,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 6080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000680000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-webhook-5655c58dd6-zfdnj_cert-manager(71f55aa3-b804-4307-96c1-fa7829f7d7d4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:47:04 crc kubenswrapper[4943]: E1129 06:47:04.066996 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" podUID="71f55aa3-b804-4307-96c1-fa7829f7d7d4" Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.826272 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"2e96d4029062d655b438da59cf10b0651e325bb6f17039fce968c2b01e6b8cc8"} Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.826890 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"3a6e92f94d665824a2a49dc3ddea548397285f81857703c026e0611fd82dd12c"} Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.826907 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"6b1d41b9e5573a3aa4d3990999fc4b0538b4c3619bd81822d41784824ce4a4bf"} Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.826920 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"37c96ad38c8f8366f6d026c1bf07e574936e23f09a4c1b99d6434027bb2c3956"} Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.827975 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" event={"ID":"ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c","Type":"ContainerStarted","Data":"b3822affee11cdf219e7e719c7be765cf6cdf477e1970d23bc144542e4693bd0"} Nov 29 06:47:04 crc kubenswrapper[4943]: E1129 06:47:04.829640 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/jetstack/cert-manager-webhook:v1.14.4\\\"\"" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" podUID="71f55aa3-b804-4307-96c1-fa7829f7d7d4" Nov 29 06:47:04 crc kubenswrapper[4943]: I1129 06:47:04.865525 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-wc5rn" podStartSLOduration=2.606128562 podStartE2EDuration="52.865499284s" podCreationTimestamp="2025-11-29 06:46:12 +0000 UTC" firstStartedPulling="2025-11-29 06:46:13.180024802 +0000 UTC m=+748.110113555" lastFinishedPulling="2025-11-29 06:47:03.439395524 +0000 UTC m=+798.369484277" observedRunningTime="2025-11-29 06:47:04.860547004 +0000 UTC m=+799.790635767" watchObservedRunningTime="2025-11-29 06:47:04.865499284 +0000 UTC m=+799.795588037" Nov 29 06:47:05 crc kubenswrapper[4943]: I1129 06:47:05.836209 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"9916bd2d8ee9b4eba70171590a0d91cc6314cc7c5435e6594b164cfc94a73188"} Nov 29 06:47:05 crc kubenswrapper[4943]: I1129 06:47:05.836265 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"2910d90c30da3ec8fef55d8f3df63bed7e15869e89f5e0bccaa1f877a78a66c1"} Nov 29 06:47:10 crc kubenswrapper[4943]: I1129 06:47:10.866629 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"56600fe7d8f3a0d558316e578a700a27e98e065cef66fa2787bcc5d9d2815d88"} Nov 29 06:47:11 crc kubenswrapper[4943]: I1129 06:47:11.880100 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" event={"ID":"b1ba4337-f0a6-4013-8cfd-dc6a757105d1","Type":"ContainerStarted","Data":"a00c366a2d629992b8c6fe7fbc5c1d108e4e96c0bb782d3d970389f78cbec3f1"} Nov 29 06:47:11 crc kubenswrapper[4943]: I1129 06:47:11.880577 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:11 crc kubenswrapper[4943]: I1129 06:47:11.880620 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:11 crc kubenswrapper[4943]: I1129 06:47:11.904488 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:11 crc kubenswrapper[4943]: I1129 06:47:11.974247 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" podStartSLOduration=46.974225965 podStartE2EDuration="46.974225965s" podCreationTimestamp="2025-11-29 06:46:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:47:11.970397563 +0000 UTC m=+806.900486326" watchObservedRunningTime="2025-11-29 06:47:11.974225965 +0000 UTC m=+806.904314718" Nov 29 06:47:12 crc kubenswrapper[4943]: I1129 06:47:12.884339 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:12 crc kubenswrapper[4943]: I1129 06:47:12.936741 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.890215 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/3.log" Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.891106 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/2.log" Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.891161 4943 generic.go:334] "Generic (PLEG): container finished" podID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" containerID="31e0200c7d43563045f395af139cf62a9114166dd3debd5be1dd74a3b4e3f78f" exitCode=1 Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.891205 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerDied","Data":"31e0200c7d43563045f395af139cf62a9114166dd3debd5be1dd74a3b4e3f78f"} Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.891273 4943 scope.go:117] "RemoveContainer" containerID="5eaaa6b16990ee0172d1ad8e17aa360746d2ac18384845a0a4717f2a0f9fce7d" Nov 29 06:47:13 crc kubenswrapper[4943]: I1129 06:47:13.892876 4943 scope.go:117] "RemoveContainer" containerID="31e0200c7d43563045f395af139cf62a9114166dd3debd5be1dd74a3b4e3f78f" Nov 29 06:47:13 crc kubenswrapper[4943]: E1129 06:47:13.893059 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-kh8qv_openshift-multus(ca406df5-4c80-44b5-9092-4ff17b0b0c72)\"" pod="openshift-multus/multus-kh8qv" podUID="ca406df5-4c80-44b5-9092-4ff17b0b0c72" Nov 29 06:47:14 crc kubenswrapper[4943]: I1129 06:47:14.900126 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/3.log" Nov 29 06:47:19 crc kubenswrapper[4943]: I1129 06:47:19.926322 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" event={"ID":"71f55aa3-b804-4307-96c1-fa7829f7d7d4","Type":"ContainerStarted","Data":"55065f23bf7858e69d63b89c14a88a56a30ca428e7dde0952e01e70bbaaa8727"} Nov 29 06:47:19 crc kubenswrapper[4943]: I1129 06:47:19.927114 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:47:19 crc kubenswrapper[4943]: I1129 06:47:19.944330 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" podStartSLOduration=2.054866394 podStartE2EDuration="1m7.944306684s" podCreationTimestamp="2025-11-29 06:46:12 +0000 UTC" firstStartedPulling="2025-11-29 06:46:13.190613426 +0000 UTC m=+748.120702179" lastFinishedPulling="2025-11-29 06:47:19.080053716 +0000 UTC m=+814.010142469" observedRunningTime="2025-11-29 06:47:19.94044866 +0000 UTC m=+814.870537423" watchObservedRunningTime="2025-11-29 06:47:19.944306684 +0000 UTC m=+814.874395457" Nov 29 06:47:24 crc kubenswrapper[4943]: I1129 06:47:24.327001 4943 scope.go:117] "RemoveContainer" containerID="31e0200c7d43563045f395af139cf62a9114166dd3debd5be1dd74a3b4e3f78f" Nov 29 06:47:26 crc kubenswrapper[4943]: I1129 06:47:26.350125 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bjqh2" Nov 29 06:47:26 crc kubenswrapper[4943]: I1129 06:47:26.964414 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kh8qv_ca406df5-4c80-44b5-9092-4ff17b0b0c72/kube-multus/3.log" Nov 29 06:47:26 crc kubenswrapper[4943]: I1129 06:47:26.964750 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kh8qv" event={"ID":"ca406df5-4c80-44b5-9092-4ff17b0b0c72","Type":"ContainerStarted","Data":"876d3ec777e37adec8f9f4afa7d670d5ce7f38f34ad8f59d45d1df6ef37bb1a6"} Nov 29 06:47:26 crc kubenswrapper[4943]: I1129 06:47:26.967748 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x7697" event={"ID":"77f68ba4-9118-4316-9cac-72735ebef023","Type":"ContainerStarted","Data":"571a1092b38917bbf327242c354ce83371331875f1d2a0494507aa6fd1dd79b4"} Nov 29 06:47:26 crc kubenswrapper[4943]: I1129 06:47:26.996193 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-x7697" podStartSLOduration=2.39771577 podStartE2EDuration="1m14.996176199s" podCreationTimestamp="2025-11-29 06:46:12 +0000 UTC" firstStartedPulling="2025-11-29 06:46:13.180128844 +0000 UTC m=+748.110217597" lastFinishedPulling="2025-11-29 06:47:25.778589273 +0000 UTC m=+820.708678026" observedRunningTime="2025-11-29 06:47:26.990075871 +0000 UTC m=+821.920164634" watchObservedRunningTime="2025-11-29 06:47:26.996176199 +0000 UTC m=+821.926264952" Nov 29 06:47:27 crc kubenswrapper[4943]: I1129 06:47:27.632229 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-zfdnj" Nov 29 06:47:32 crc kubenswrapper[4943]: I1129 06:47:32.614116 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:47:32 crc kubenswrapper[4943]: I1129 06:47:32.614860 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:48:02 crc kubenswrapper[4943]: I1129 06:48:02.615061 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:48:02 crc kubenswrapper[4943]: I1129 06:48:02.615741 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:48:02 crc kubenswrapper[4943]: I1129 06:48:02.615803 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:48:02 crc kubenswrapper[4943]: I1129 06:48:02.616616 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:48:02 crc kubenswrapper[4943]: I1129 06:48:02.616683 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f" gracePeriod=600 Nov 29 06:48:04 crc kubenswrapper[4943]: I1129 06:48:04.199291 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f" exitCode=0 Nov 29 06:48:04 crc kubenswrapper[4943]: I1129 06:48:04.199386 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f"} Nov 29 06:48:04 crc kubenswrapper[4943]: I1129 06:48:04.199762 4943 scope.go:117] "RemoveContainer" containerID="4b6d56c48a91c01fc97cfd0d2fb91b2725d9c5c63de568919fc058a8e02a4a2d" Nov 29 06:48:05 crc kubenswrapper[4943]: I1129 06:48:05.207779 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e"} Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.133902 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf"] Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.137284 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.140835 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.151167 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf"] Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.221866 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.224438 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.224687 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9qtj\" (UniqueName: \"kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.326232 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.326825 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.326759 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.326914 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9qtj\" (UniqueName: \"kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.327164 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.350334 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9qtj\" (UniqueName: \"kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.460001 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:48:15 crc kubenswrapper[4943]: I1129 06:48:15.866098 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf"] Nov 29 06:48:16 crc kubenswrapper[4943]: I1129 06:48:16.264266 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerStarted","Data":"57f5e81d5a88c2dded490a41846e3d4a6497aa0af4799bc2da44cd599819f842"} Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.186530 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.188762 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.195004 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.256060 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2hb2\" (UniqueName: \"kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.256136 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.256218 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.357342 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.357402 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2hb2\" (UniqueName: \"kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.357434 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.357925 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.358202 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.379252 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2hb2\" (UniqueName: \"kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2\") pod \"redhat-operators-m695b\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.512180 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:48:17 crc kubenswrapper[4943]: I1129 06:48:17.728348 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:48:17 crc kubenswrapper[4943]: W1129 06:48:17.740679 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3bc3a154_0ead_471a_8cb9_0bb06898b6e2.slice/crio-c6006405ef7e6714af3e5bed087b1260121445ee7359d4cc3e45b8902d75e1da WatchSource:0}: Error finding container c6006405ef7e6714af3e5bed087b1260121445ee7359d4cc3e45b8902d75e1da: Status 404 returned error can't find the container with id c6006405ef7e6714af3e5bed087b1260121445ee7359d4cc3e45b8902d75e1da Nov 29 06:48:18 crc kubenswrapper[4943]: I1129 06:48:18.276748 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerStarted","Data":"c6006405ef7e6714af3e5bed087b1260121445ee7359d4cc3e45b8902d75e1da"} Nov 29 06:48:28 crc kubenswrapper[4943]: I1129 06:48:28.335669 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerStarted","Data":"07b5a5423920f3e71bc5209de5cf1ea1227978d6d7d50ca15d4c1f80141330c5"} Nov 29 06:48:32 crc kubenswrapper[4943]: I1129 06:48:32.361787 4943 generic.go:334] "Generic (PLEG): container finished" podID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerID="8a85d52fcaafe97652a779b6ad10fb4c4cd4e37d60b606fafbd29bc81e5a5cc9" exitCode=0 Nov 29 06:48:32 crc kubenswrapper[4943]: I1129 06:48:32.361900 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerDied","Data":"8a85d52fcaafe97652a779b6ad10fb4c4cd4e37d60b606fafbd29bc81e5a5cc9"} Nov 29 06:48:34 crc kubenswrapper[4943]: I1129 06:48:33.369294 4943 generic.go:334] "Generic (PLEG): container finished" podID="cfb30518-7399-46a6-8755-379d920dfa8a" containerID="07b5a5423920f3e71bc5209de5cf1ea1227978d6d7d50ca15d4c1f80141330c5" exitCode=0 Nov 29 06:48:34 crc kubenswrapper[4943]: I1129 06:48:33.369395 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerDied","Data":"07b5a5423920f3e71bc5209de5cf1ea1227978d6d7d50ca15d4c1f80141330c5"} Nov 29 06:48:42 crc kubenswrapper[4943]: I1129 06:48:42.429792 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerStarted","Data":"270930f871ee4bdc06a23507b42eb0921888c0cfee794ce011a2c6622b7d7849"} Nov 29 06:48:44 crc kubenswrapper[4943]: I1129 06:48:44.455608 4943 generic.go:334] "Generic (PLEG): container finished" podID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerID="270930f871ee4bdc06a23507b42eb0921888c0cfee794ce011a2c6622b7d7849" exitCode=0 Nov 29 06:48:44 crc kubenswrapper[4943]: I1129 06:48:44.455704 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerDied","Data":"270930f871ee4bdc06a23507b42eb0921888c0cfee794ce011a2c6622b7d7849"} Nov 29 06:49:04 crc kubenswrapper[4943]: I1129 06:49:04.574892 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerStarted","Data":"78082da2d7ed0fa94031984a016e856871842eb62db679c709fc4b37ac487bfb"} Nov 29 06:49:04 crc kubenswrapper[4943]: I1129 06:49:04.579681 4943 generic.go:334] "Generic (PLEG): container finished" podID="cfb30518-7399-46a6-8755-379d920dfa8a" containerID="c34c056355167d0f8a6cd27426341cc3ca4549334ed9a269bcfd233b1f735c27" exitCode=0 Nov 29 06:49:04 crc kubenswrapper[4943]: I1129 06:49:04.579736 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerDied","Data":"c34c056355167d0f8a6cd27426341cc3ca4549334ed9a269bcfd233b1f735c27"} Nov 29 06:49:04 crc kubenswrapper[4943]: I1129 06:49:04.614834 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m695b" podStartSLOduration=18.203118045 podStartE2EDuration="47.614803777s" podCreationTimestamp="2025-11-29 06:48:17 +0000 UTC" firstStartedPulling="2025-11-29 06:48:33.371092414 +0000 UTC m=+888.301181167" lastFinishedPulling="2025-11-29 06:49:02.782778146 +0000 UTC m=+917.712866899" observedRunningTime="2025-11-29 06:49:04.600951939 +0000 UTC m=+919.531040692" watchObservedRunningTime="2025-11-29 06:49:04.614803777 +0000 UTC m=+919.544892530" Nov 29 06:49:05 crc kubenswrapper[4943]: I1129 06:49:05.591690 4943 generic.go:334] "Generic (PLEG): container finished" podID="cfb30518-7399-46a6-8755-379d920dfa8a" containerID="65680f983ca2bdd25a3cc45c3508333712e2798cb7cfcb4bfcb3bd5549be09e7" exitCode=0 Nov 29 06:49:05 crc kubenswrapper[4943]: I1129 06:49:05.591821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerDied","Data":"65680f983ca2bdd25a3cc45c3508333712e2798cb7cfcb4bfcb3bd5549be09e7"} Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.862067 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.946261 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9qtj\" (UniqueName: \"kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj\") pod \"cfb30518-7399-46a6-8755-379d920dfa8a\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.946331 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util\") pod \"cfb30518-7399-46a6-8755-379d920dfa8a\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.946382 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle\") pod \"cfb30518-7399-46a6-8755-379d920dfa8a\" (UID: \"cfb30518-7399-46a6-8755-379d920dfa8a\") " Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.947238 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle" (OuterVolumeSpecName: "bundle") pod "cfb30518-7399-46a6-8755-379d920dfa8a" (UID: "cfb30518-7399-46a6-8755-379d920dfa8a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.954899 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj" (OuterVolumeSpecName: "kube-api-access-n9qtj") pod "cfb30518-7399-46a6-8755-379d920dfa8a" (UID: "cfb30518-7399-46a6-8755-379d920dfa8a"). InnerVolumeSpecName "kube-api-access-n9qtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:49:06 crc kubenswrapper[4943]: I1129 06:49:06.966493 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util" (OuterVolumeSpecName: "util") pod "cfb30518-7399-46a6-8755-379d920dfa8a" (UID: "cfb30518-7399-46a6-8755-379d920dfa8a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.047636 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9qtj\" (UniqueName: \"kubernetes.io/projected/cfb30518-7399-46a6-8755-379d920dfa8a-kube-api-access-n9qtj\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.047684 4943 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-util\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.047699 4943 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cfb30518-7399-46a6-8755-379d920dfa8a-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.512702 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.512781 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.605269 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" event={"ID":"cfb30518-7399-46a6-8755-379d920dfa8a","Type":"ContainerDied","Data":"57f5e81d5a88c2dded490a41846e3d4a6497aa0af4799bc2da44cd599819f842"} Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.605319 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57f5e81d5a88c2dded490a41846e3d4a6497aa0af4799bc2da44cd599819f842" Nov 29 06:49:07 crc kubenswrapper[4943]: I1129 06:49:07.605401 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf" Nov 29 06:49:08 crc kubenswrapper[4943]: I1129 06:49:08.570726 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m695b" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="registry-server" probeResult="failure" output=< Nov 29 06:49:08 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 06:49:08 crc kubenswrapper[4943]: > Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.795818 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb"] Nov 29 06:49:11 crc kubenswrapper[4943]: E1129 06:49:11.796387 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="pull" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.796405 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="pull" Nov 29 06:49:11 crc kubenswrapper[4943]: E1129 06:49:11.796418 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="extract" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.796426 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="extract" Nov 29 06:49:11 crc kubenswrapper[4943]: E1129 06:49:11.796442 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="util" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.796453 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="util" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.796596 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfb30518-7399-46a6-8755-379d920dfa8a" containerName="extract" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.797092 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.798978 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-59v7c" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.799091 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.799145 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.815748 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb"] Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.817250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6g6p\" (UniqueName: \"kubernetes.io/projected/06fa1be5-8c71-42da-83ab-0e436d55137b-kube-api-access-m6g6p\") pod \"nmstate-operator-5b5b58f5c8-8dprb\" (UID: \"06fa1be5-8c71-42da-83ab-0e436d55137b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.918728 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6g6p\" (UniqueName: \"kubernetes.io/projected/06fa1be5-8c71-42da-83ab-0e436d55137b-kube-api-access-m6g6p\") pod \"nmstate-operator-5b5b58f5c8-8dprb\" (UID: \"06fa1be5-8c71-42da-83ab-0e436d55137b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" Nov 29 06:49:11 crc kubenswrapper[4943]: I1129 06:49:11.943190 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6g6p\" (UniqueName: \"kubernetes.io/projected/06fa1be5-8c71-42da-83ab-0e436d55137b-kube-api-access-m6g6p\") pod \"nmstate-operator-5b5b58f5c8-8dprb\" (UID: \"06fa1be5-8c71-42da-83ab-0e436d55137b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" Nov 29 06:49:12 crc kubenswrapper[4943]: I1129 06:49:12.112879 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" Nov 29 06:49:12 crc kubenswrapper[4943]: I1129 06:49:12.555877 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb"] Nov 29 06:49:12 crc kubenswrapper[4943]: I1129 06:49:12.635948 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" event={"ID":"06fa1be5-8c71-42da-83ab-0e436d55137b","Type":"ContainerStarted","Data":"8585c764a7f34ff27aff601d21d49074f70587372a2022a388ad94522ee1ef33"} Nov 29 06:49:17 crc kubenswrapper[4943]: I1129 06:49:17.559714 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:17 crc kubenswrapper[4943]: I1129 06:49:17.598806 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:19 crc kubenswrapper[4943]: I1129 06:49:19.917139 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:49:19 crc kubenswrapper[4943]: I1129 06:49:19.917366 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m695b" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="registry-server" containerID="cri-o://78082da2d7ed0fa94031984a016e856871842eb62db679c709fc4b37ac487bfb" gracePeriod=2 Nov 29 06:49:24 crc kubenswrapper[4943]: I1129 06:49:24.711756 4943 generic.go:334] "Generic (PLEG): container finished" podID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerID="78082da2d7ed0fa94031984a016e856871842eb62db679c709fc4b37ac487bfb" exitCode=0 Nov 29 06:49:24 crc kubenswrapper[4943]: I1129 06:49:24.711832 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerDied","Data":"78082da2d7ed0fa94031984a016e856871842eb62db679c709fc4b37ac487bfb"} Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.066866 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.149626 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities\") pod \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.149738 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2hb2\" (UniqueName: \"kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2\") pod \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.149794 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content\") pod \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\" (UID: \"3bc3a154-0ead-471a-8cb9-0bb06898b6e2\") " Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.151102 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities" (OuterVolumeSpecName: "utilities") pod "3bc3a154-0ead-471a-8cb9-0bb06898b6e2" (UID: "3bc3a154-0ead-471a-8cb9-0bb06898b6e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.155716 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2" (OuterVolumeSpecName: "kube-api-access-r2hb2") pod "3bc3a154-0ead-471a-8cb9-0bb06898b6e2" (UID: "3bc3a154-0ead-471a-8cb9-0bb06898b6e2"). InnerVolumeSpecName "kube-api-access-r2hb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.251583 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2hb2\" (UniqueName: \"kubernetes.io/projected/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-kube-api-access-r2hb2\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.251616 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.258862 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3bc3a154-0ead-471a-8cb9-0bb06898b6e2" (UID: "3bc3a154-0ead-471a-8cb9-0bb06898b6e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.352380 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc3a154-0ead-471a-8cb9-0bb06898b6e2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.729799 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m695b" event={"ID":"3bc3a154-0ead-471a-8cb9-0bb06898b6e2","Type":"ContainerDied","Data":"c6006405ef7e6714af3e5bed087b1260121445ee7359d4cc3e45b8902d75e1da"} Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.729866 4943 scope.go:117] "RemoveContainer" containerID="78082da2d7ed0fa94031984a016e856871842eb62db679c709fc4b37ac487bfb" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.729900 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m695b" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.745863 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.748592 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m695b"] Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.753045 4943 scope.go:117] "RemoveContainer" containerID="270930f871ee4bdc06a23507b42eb0921888c0cfee794ce011a2c6622b7d7849" Nov 29 06:49:27 crc kubenswrapper[4943]: I1129 06:49:27.770774 4943 scope.go:117] "RemoveContainer" containerID="8a85d52fcaafe97652a779b6ad10fb4c4cd4e37d60b606fafbd29bc81e5a5cc9" Nov 29 06:49:29 crc kubenswrapper[4943]: I1129 06:49:29.337619 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" path="/var/lib/kubelet/pods/3bc3a154-0ead-471a-8cb9-0bb06898b6e2/volumes" Nov 29 06:49:30 crc kubenswrapper[4943]: E1129 06:49:30.202698 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:dd89e08ed6257597e99b1243839d5c76e6bad72fe9e168c0eba5ce9c449189cf" Nov 29 06:49:30 crc kubenswrapper[4943]: E1129 06:49:30.202873 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-operator,Image:registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:dd89e08ed6257597e99b1243839d5c76e6bad72fe9e168c0eba5ce9c449189cf,Command:[manager],Args:[--zap-time-encoding=iso8601],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.annotations['olm.targetNamespaces'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:kubernetes-nmstate-operator,ValueFrom:nil,},EnvVar{Name:ENABLE_PROFILER,Value:False,ValueFrom:nil,},EnvVar{Name:PROFILER_PORT,Value:6060,ValueFrom:nil,},EnvVar{Name:RUN_OPERATOR,Value:,ValueFrom:nil,},EnvVar{Name:HANDLER_IMAGE,Value:registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8045b3d5059cc81bf37964d359055dea9e4915c83f3eec4f800d5ce294c06f97,ValueFrom:nil,},EnvVar{Name:PLUGIN_IMAGE,Value:registry.redhat.io/openshift4/nmstate-console-plugin-rhel9@sha256:10fe26b1ef17d6fa13d22976b553b935f1cc14e74b8dd14a31306554aff7c513,ValueFrom:nil,},EnvVar{Name:HANDLER_IMAGE_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HANDLER_NAMESPACE,Value:openshift-nmstate,ValueFrom:nil,},EnvVar{Name:MONITORING_NAMESPACE,Value:openshift-monitoring,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:23ad174e653d608ec2285f670d8669dbe8bb433f7c215bdb59f5c6ac6ad1bcc9,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:kubernetes-nmstate-operator.4.18.0-202511191213,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{60 -3} {} 60m DecimalSI},memory: {{31457280 0} {} 30Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m6g6p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-operator-5b5b58f5c8-8dprb_openshift-nmstate(06fa1be5-8c71-42da-83ab-0e436d55137b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:49:30 crc kubenswrapper[4943]: E1129 06:49:30.204056 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" podUID="06fa1be5-8c71-42da-83ab-0e436d55137b" Nov 29 06:49:30 crc kubenswrapper[4943]: E1129 06:49:30.747230 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:dd89e08ed6257597e99b1243839d5c76e6bad72fe9e168c0eba5ce9c449189cf\\\"\"" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" podUID="06fa1be5-8c71-42da-83ab-0e436d55137b" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.961918 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:49:37 crc kubenswrapper[4943]: E1129 06:49:37.963378 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="extract-content" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.963398 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="extract-content" Nov 29 06:49:37 crc kubenswrapper[4943]: E1129 06:49:37.963409 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="extract-utilities" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.963416 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="extract-utilities" Nov 29 06:49:37 crc kubenswrapper[4943]: E1129 06:49:37.963430 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="registry-server" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.963436 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="registry-server" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.963607 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bc3a154-0ead-471a-8cb9-0bb06898b6e2" containerName="registry-server" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.964619 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:37 crc kubenswrapper[4943]: I1129 06:49:37.975500 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.119587 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.119692 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9b42\" (UniqueName: \"kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.119887 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.221475 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.221591 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9b42\" (UniqueName: \"kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.221650 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.222122 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.222345 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.241010 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9b42\" (UniqueName: \"kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42\") pod \"redhat-marketplace-4r826\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.295881 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.523244 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:49:38 crc kubenswrapper[4943]: I1129 06:49:38.791581 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerStarted","Data":"f16a48389751bd73a3b686ae2aaf7172559121e98335eba7e322838d38309f1a"} Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.351236 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.352534 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.366759 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.461483 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.462174 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkbft\" (UniqueName: \"kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.462229 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.563654 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.563725 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkbft\" (UniqueName: \"kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.563751 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.564401 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.564415 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.584094 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkbft\" (UniqueName: \"kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft\") pod \"certified-operators-wfpq8\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.679650 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:49:41 crc kubenswrapper[4943]: I1129 06:49:41.949161 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:49:42 crc kubenswrapper[4943]: I1129 06:49:42.817137 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerStarted","Data":"9bb894e03507c4121c074f6c1d0e72055bfbc34c3bdc9aeb51327763e07ec8b8"} Nov 29 06:49:43 crc kubenswrapper[4943]: I1129 06:49:43.824239 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerStarted","Data":"464f8e622bc989c4f6bff31b21b19d1fd275d6faaf681d03d6077c54c941ee77"} Nov 29 06:49:52 crc kubenswrapper[4943]: I1129 06:49:52.884945 4943 generic.go:334] "Generic (PLEG): container finished" podID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerID="464f8e622bc989c4f6bff31b21b19d1fd275d6faaf681d03d6077c54c941ee77" exitCode=0 Nov 29 06:49:52 crc kubenswrapper[4943]: I1129 06:49:52.885246 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerDied","Data":"464f8e622bc989c4f6bff31b21b19d1fd275d6faaf681d03d6077c54c941ee77"} Nov 29 06:49:55 crc kubenswrapper[4943]: I1129 06:49:55.901210 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerStarted","Data":"efaad2aa3c194cd81c62c9606bbfb60bdc7e2e95afa291816fd1497d53ad75dd"} Nov 29 06:49:56 crc kubenswrapper[4943]: I1129 06:49:56.908921 4943 generic.go:334] "Generic (PLEG): container finished" podID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerID="efaad2aa3c194cd81c62c9606bbfb60bdc7e2e95afa291816fd1497d53ad75dd" exitCode=0 Nov 29 06:49:56 crc kubenswrapper[4943]: I1129 06:49:56.908971 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerDied","Data":"efaad2aa3c194cd81c62c9606bbfb60bdc7e2e95afa291816fd1497d53ad75dd"} Nov 29 06:50:31 crc kubenswrapper[4943]: I1129 06:50:31.094047 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" event={"ID":"06fa1be5-8c71-42da-83ab-0e436d55137b","Type":"ContainerStarted","Data":"ac858ecf07c551e692ead618ce3da129bba8f83bd9e82fb34a009058fc60ceac"} Nov 29 06:50:31 crc kubenswrapper[4943]: I1129 06:50:31.096939 4943 generic.go:334] "Generic (PLEG): container finished" podID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerID="d1fa560f5c6695efa79c2e9275e0df90ce767af1afeabff037fac5b319b1638d" exitCode=0 Nov 29 06:50:31 crc kubenswrapper[4943]: I1129 06:50:31.096978 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerDied","Data":"d1fa560f5c6695efa79c2e9275e0df90ce767af1afeabff037fac5b319b1638d"} Nov 29 06:50:32 crc kubenswrapper[4943]: I1129 06:50:32.613409 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:50:32 crc kubenswrapper[4943]: I1129 06:50:32.613472 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:50:34 crc kubenswrapper[4943]: I1129 06:50:34.133781 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-8dprb" podStartSLOduration=8.225388069 podStartE2EDuration="1m23.133763719s" podCreationTimestamp="2025-11-29 06:49:11 +0000 UTC" firstStartedPulling="2025-11-29 06:49:12.564415687 +0000 UTC m=+927.494504440" lastFinishedPulling="2025-11-29 06:50:27.472791337 +0000 UTC m=+1002.402880090" observedRunningTime="2025-11-29 06:50:34.129680272 +0000 UTC m=+1009.059769045" watchObservedRunningTime="2025-11-29 06:50:34.133763719 +0000 UTC m=+1009.063852472" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.242205 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.243430 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.245311 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-c6q8r" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.251047 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.251817 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.253082 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.258166 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.270247 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.275446 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-kf6kc"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.276323 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436080 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpqxd\" (UniqueName: \"kubernetes.io/projected/b458e00f-1442-452e-9588-5c4b822e1bf8-kube-api-access-kpqxd\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436156 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436194 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcrv8\" (UniqueName: \"kubernetes.io/projected/83d362f9-3c5d-4ee3-98fd-dea1eec92b79-kube-api-access-mcrv8\") pod \"nmstate-metrics-7f946cbc9-nmd65\" (UID: \"83d362f9-3c5d-4ee3-98fd-dea1eec92b79\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436259 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-ovs-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436287 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-nmstate-lock\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436354 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-dbus-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.436375 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9f6b\" (UniqueName: \"kubernetes.io/projected/32aeaef6-5f98-42e2-97c3-65c6494f256c-kube-api-access-f9f6b\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.443425 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.444160 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.446031 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.446657 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-bcqps" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.447161 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.454609 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537413 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-dbus-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537468 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9f6b\" (UniqueName: \"kubernetes.io/projected/32aeaef6-5f98-42e2-97c3-65c6494f256c-kube-api-access-f9f6b\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537497 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpqxd\" (UniqueName: \"kubernetes.io/projected/b458e00f-1442-452e-9588-5c4b822e1bf8-kube-api-access-kpqxd\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537538 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537583 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcrv8\" (UniqueName: \"kubernetes.io/projected/83d362f9-3c5d-4ee3-98fd-dea1eec92b79-kube-api-access-mcrv8\") pod \"nmstate-metrics-7f946cbc9-nmd65\" (UID: \"83d362f9-3c5d-4ee3-98fd-dea1eec92b79\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537631 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-ovs-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537701 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-nmstate-lock\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.537784 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-nmstate-lock\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: E1129 06:50:35.538216 4943 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.538230 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-ovs-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: E1129 06:50:35.538365 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair podName:b458e00f-1442-452e-9588-5c4b822e1bf8 nodeName:}" failed. No retries permitted until 2025-11-29 06:50:36.038342519 +0000 UTC m=+1010.968431332 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-7xj8x" (UID: "b458e00f-1442-452e-9588-5c4b822e1bf8") : secret "openshift-nmstate-webhook" not found Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.538465 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/32aeaef6-5f98-42e2-97c3-65c6494f256c-dbus-socket\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.560514 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9f6b\" (UniqueName: \"kubernetes.io/projected/32aeaef6-5f98-42e2-97c3-65c6494f256c-kube-api-access-f9f6b\") pod \"nmstate-handler-kf6kc\" (UID: \"32aeaef6-5f98-42e2-97c3-65c6494f256c\") " pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.563463 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcrv8\" (UniqueName: \"kubernetes.io/projected/83d362f9-3c5d-4ee3-98fd-dea1eec92b79-kube-api-access-mcrv8\") pod \"nmstate-metrics-7f946cbc9-nmd65\" (UID: \"83d362f9-3c5d-4ee3-98fd-dea1eec92b79\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.569912 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpqxd\" (UniqueName: \"kubernetes.io/projected/b458e00f-1442-452e-9588-5c4b822e1bf8-kube-api-access-kpqxd\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.621099 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-84f9d695fc-h29wl"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.621805 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.638615 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-459nd\" (UniqueName: \"kubernetes.io/projected/4f1e28f0-85d9-4d51-900a-33ab52c3d087-kube-api-access-459nd\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.638657 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4f1e28f0-85d9-4d51-900a-33ab52c3d087-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.638683 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.644854 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84f9d695fc-h29wl"] Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.652814 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739586 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75f9s\" (UniqueName: \"kubernetes.io/projected/436dbf02-a82a-4ce6-ab27-a0bc92683d22-kube-api-access-75f9s\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739632 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-service-ca\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739652 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739740 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-trusted-ca-bundle\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739772 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.739931 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-459nd\" (UniqueName: \"kubernetes.io/projected/4f1e28f0-85d9-4d51-900a-33ab52c3d087-kube-api-access-459nd\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.740422 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-oauth-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.740502 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4f1e28f0-85d9-4d51-900a-33ab52c3d087-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.740971 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-oauth-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.741128 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: E1129 06:50:35.741331 4943 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 29 06:50:35 crc kubenswrapper[4943]: E1129 06:50:35.741420 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert podName:4f1e28f0-85d9-4d51-900a-33ab52c3d087 nodeName:}" failed. No retries permitted until 2025-11-29 06:50:36.241402027 +0000 UTC m=+1011.171490780 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-fsz5q" (UID: "4f1e28f0-85d9-4d51-900a-33ab52c3d087") : secret "plugin-serving-cert" not found Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.742118 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4f1e28f0-85d9-4d51-900a-33ab52c3d087-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.761213 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-459nd\" (UniqueName: \"kubernetes.io/projected/4f1e28f0-85d9-4d51-900a-33ab52c3d087-kube-api-access-459nd\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.842768 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-oauth-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.842867 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75f9s\" (UniqueName: \"kubernetes.io/projected/436dbf02-a82a-4ce6-ab27-a0bc92683d22-kube-api-access-75f9s\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.842908 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-service-ca\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.842929 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.842992 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-trusted-ca-bundle\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.843024 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.843058 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-oauth-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.844670 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-service-ca\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.844879 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-oauth-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.844951 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-trusted-ca-bundle\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.845120 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.847134 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-serving-cert\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.847694 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/436dbf02-a82a-4ce6-ab27-a0bc92683d22-console-oauth-config\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.859974 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75f9s\" (UniqueName: \"kubernetes.io/projected/436dbf02-a82a-4ce6-ab27-a0bc92683d22-kube-api-access-75f9s\") pod \"console-84f9d695fc-h29wl\" (UID: \"436dbf02-a82a-4ce6-ab27-a0bc92683d22\") " pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.861619 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" Nov 29 06:50:35 crc kubenswrapper[4943]: I1129 06:50:35.945041 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.046402 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.050894 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b458e00f-1442-452e-9588-5c4b822e1bf8-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-7xj8x\" (UID: \"b458e00f-1442-452e-9588-5c4b822e1bf8\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.051161 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65"] Nov 29 06:50:36 crc kubenswrapper[4943]: W1129 06:50:36.061529 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83d362f9_3c5d_4ee3_98fd_dea1eec92b79.slice/crio-be9e3e78ed7249cc01d76fb8bf769592e0d5620d8c118bc3db76af7c33b859e1 WatchSource:0}: Error finding container be9e3e78ed7249cc01d76fb8bf769592e0d5620d8c118bc3db76af7c33b859e1: Status 404 returned error can't find the container with id be9e3e78ed7249cc01d76fb8bf769592e0d5620d8c118bc3db76af7c33b859e1 Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.141431 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-kf6kc" event={"ID":"32aeaef6-5f98-42e2-97c3-65c6494f256c","Type":"ContainerStarted","Data":"c791f243e4b2f517855840f97eabd0513295325c9ba04b373028da169225a067"} Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.142402 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" event={"ID":"83d362f9-3c5d-4ee3-98fd-dea1eec92b79","Type":"ContainerStarted","Data":"be9e3e78ed7249cc01d76fb8bf769592e0d5620d8c118bc3db76af7c33b859e1"} Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.169237 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84f9d695fc-h29wl"] Nov 29 06:50:36 crc kubenswrapper[4943]: W1129 06:50:36.170528 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod436dbf02_a82a_4ce6_ab27_a0bc92683d22.slice/crio-638b12c5467073dce8e9753598aa32c5eb14ee69ddb2c93cb39d4d613da7340b WatchSource:0}: Error finding container 638b12c5467073dce8e9753598aa32c5eb14ee69ddb2c93cb39d4d613da7340b: Status 404 returned error can't find the container with id 638b12c5467073dce8e9753598aa32c5eb14ee69ddb2c93cb39d4d613da7340b Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.186274 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.249739 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.254257 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f1e28f0-85d9-4d51-900a-33ab52c3d087-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-fsz5q\" (UID: \"4f1e28f0-85d9-4d51-900a-33ab52c3d087\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.365819 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.383814 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x"] Nov 29 06:50:36 crc kubenswrapper[4943]: W1129 06:50:36.395091 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb458e00f_1442_452e_9588_5c4b822e1bf8.slice/crio-94d31a23f506984a468dc9677b039098d6d4dd9d7b6ffd2bfa0d418b15b501f8 WatchSource:0}: Error finding container 94d31a23f506984a468dc9677b039098d6d4dd9d7b6ffd2bfa0d418b15b501f8: Status 404 returned error can't find the container with id 94d31a23f506984a468dc9677b039098d6d4dd9d7b6ffd2bfa0d418b15b501f8 Nov 29 06:50:36 crc kubenswrapper[4943]: I1129 06:50:36.807221 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q"] Nov 29 06:50:37 crc kubenswrapper[4943]: I1129 06:50:37.150382 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" event={"ID":"4f1e28f0-85d9-4d51-900a-33ab52c3d087","Type":"ContainerStarted","Data":"9507b3bcc4506a4c6f3905e05805b8f694e60fd0d9552bce5e7f8f87873e6a91"} Nov 29 06:50:37 crc kubenswrapper[4943]: I1129 06:50:37.151783 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" event={"ID":"b458e00f-1442-452e-9588-5c4b822e1bf8","Type":"ContainerStarted","Data":"94d31a23f506984a468dc9677b039098d6d4dd9d7b6ffd2bfa0d418b15b501f8"} Nov 29 06:50:37 crc kubenswrapper[4943]: I1129 06:50:37.152875 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84f9d695fc-h29wl" event={"ID":"436dbf02-a82a-4ce6-ab27-a0bc92683d22","Type":"ContainerStarted","Data":"638b12c5467073dce8e9753598aa32c5eb14ee69ddb2c93cb39d4d613da7340b"} Nov 29 06:50:51 crc kubenswrapper[4943]: I1129 06:50:51.234871 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84f9d695fc-h29wl" event={"ID":"436dbf02-a82a-4ce6-ab27-a0bc92683d22","Type":"ContainerStarted","Data":"0564f4b98c3d70035ab2888ef8797430d4a16cecd5c70977139b69e795340a5f"} Nov 29 06:50:52 crc kubenswrapper[4943]: E1129 06:50:52.707439 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8045b3d5059cc81bf37964d359055dea9e4915c83f3eec4f800d5ce294c06f97" Nov 29 06:50:52 crc kubenswrapper[4943]: E1129 06:50:52.707934 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8045b3d5059cc81bf37964d359055dea9e4915c83f3eec4f800d5ce294c06f97" Nov 29 06:50:52 crc kubenswrapper[4943]: E1129 06:50:52.708001 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-handler,Image:registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8045b3d5059cc81bf37964d359055dea9e4915c83f3eec4f800d5ce294c06f97,Command:[manager],Args:[--zap-time-encoding=iso8601],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:COMPONENT,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.labels['app.kubernetes.io/component'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:PART_OF,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.labels['app.kubernetes.io/part-of'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:VERSION,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.labels['app.kubernetes.io/version'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:MANAGED_BY,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.labels['app.kubernetes.io/managed-by'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:nmstate,ValueFrom:nil,},EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:ENABLE_PROFILER,Value:False,ValueFrom:nil,},EnvVar{Name:PROFILER_PORT,Value:6060,ValueFrom:nil,},EnvVar{Name:NMSTATE_INSTANCE_NODE_LOCK_FILE,Value:/var/k8s_nmstate/handler_lock,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{104857600 0} {} 100Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:dbus-socket,ReadOnly:false,MountPath:/run/dbus/system_bus_socket,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:nmstate-lock,ReadOnly:false,MountPath:/var/k8s_nmstate,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovs-socket,ReadOnly:false,MountPath:/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f9f6b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[cat /tmp/healthy],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-handler-kf6kc_openshift-nmstate(32aeaef6-5f98-42e2-97c3-65c6494f256c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:50:52 crc kubenswrapper[4943]: E1129 06:50:52.708115 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-metrics,Image:registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8045b3d5059cc81bf37964d359055dea9e4915c83f3eec4f800d5ce294c06f97,Command:[manager],Args:[--zap-time-encoding=iso8601],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:RUN_METRICS_MANAGER,Value:,ValueFrom:nil,},EnvVar{Name:OPERATOR_NAME,Value:nmstate,ValueFrom:nil,},EnvVar{Name:ENABLE_PROFILER,Value:False,ValueFrom:nil,},EnvVar{Name:PROFILER_PORT,Value:6060,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{30 -3} {} 30m DecimalSI},memory: {{20971520 0} {} 20Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mcrv8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-metrics-7f946cbc9-nmd65_openshift-nmstate(83d362f9-3c5d-4ee3-98fd-dea1eec92b79): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:50:52 crc kubenswrapper[4943]: E1129 06:50:52.709209 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-handler\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-nmstate/nmstate-handler-kf6kc" podUID="32aeaef6-5f98-42e2-97c3-65c6494f256c" Nov 29 06:50:53 crc kubenswrapper[4943]: I1129 06:50:53.269458 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-84f9d695fc-h29wl" podStartSLOduration=18.26943426 podStartE2EDuration="18.26943426s" podCreationTimestamp="2025-11-29 06:50:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:50:53.260474225 +0000 UTC m=+1028.190562978" watchObservedRunningTime="2025-11-29 06:50:53.26943426 +0000 UTC m=+1028.199523003" Nov 29 06:50:55 crc kubenswrapper[4943]: I1129 06:50:55.945591 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:55 crc kubenswrapper[4943]: I1129 06:50:55.946226 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:55 crc kubenswrapper[4943]: I1129 06:50:55.950721 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:56 crc kubenswrapper[4943]: I1129 06:50:56.267672 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-84f9d695fc-h29wl" Nov 29 06:50:56 crc kubenswrapper[4943]: I1129 06:50:56.318133 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:51:02 crc kubenswrapper[4943]: I1129 06:51:02.613136 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:51:02 crc kubenswrapper[4943]: I1129 06:51:02.613792 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:51:21 crc kubenswrapper[4943]: I1129 06:51:21.426281 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-jxf26" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" containerID="cri-o://1557192a6d7fee4b0900b4925e7ff68190f8f79047ae88efec5e0e96b6edd04a" gracePeriod=15 Nov 29 06:51:21 crc kubenswrapper[4943]: E1129 06:51:21.566797 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/nmstate-console-plugin-rhel9@sha256:10fe26b1ef17d6fa13d22976b553b935f1cc14e74b8dd14a31306554aff7c513" Nov 29 06:51:21 crc kubenswrapper[4943]: E1129 06:51:21.566967 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-console-plugin,Image:registry.redhat.io/openshift4/nmstate-console-plugin-rhel9@sha256:10fe26b1ef17d6fa13d22976b553b935f1cc14e74b8dd14a31306554aff7c513,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:9443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugin-serving-cert,ReadOnly:true,MountPath:/var/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:nginx-conf,ReadOnly:true,MountPath:/etc/nginx/nginx.conf,SubPath:nginx.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-459nd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-console-plugin-7fbb5f6569-fsz5q_openshift-nmstate(4f1e28f0-85d9-4d51-900a-33ab52c3d087): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:51:21 crc kubenswrapper[4943]: E1129 06:51:21.568165 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-console-plugin\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" podUID="4f1e28f0-85d9-4d51-900a-33ab52c3d087" Nov 29 06:51:22 crc kubenswrapper[4943]: I1129 06:51:22.422022 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jxf26_1dec43e3-8363-43ca-a96b-6127086f75db/console/0.log" Nov 29 06:51:22 crc kubenswrapper[4943]: I1129 06:51:22.422119 4943 generic.go:334] "Generic (PLEG): container finished" podID="1dec43e3-8363-43ca-a96b-6127086f75db" containerID="1557192a6d7fee4b0900b4925e7ff68190f8f79047ae88efec5e0e96b6edd04a" exitCode=2 Nov 29 06:51:22 crc kubenswrapper[4943]: I1129 06:51:22.422228 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jxf26" event={"ID":"1dec43e3-8363-43ca-a96b-6127086f75db","Type":"ContainerDied","Data":"1557192a6d7fee4b0900b4925e7ff68190f8f79047ae88efec5e0e96b6edd04a"} Nov 29 06:51:23 crc kubenswrapper[4943]: E1129 06:51:23.148958 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-console-plugin\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/nmstate-console-plugin-rhel9@sha256:10fe26b1ef17d6fa13d22976b553b935f1cc14e74b8dd14a31306554aff7c513\\\"\"" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" podUID="4f1e28f0-85d9-4d51-900a-33ab52c3d087" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.210423 4943 patch_prober.go:28] interesting pod/console-f9d7485db-jxf26 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.210846 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-jxf26" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.418584 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jxf26_1dec43e3-8363-43ca-a96b-6127086f75db/console/0.log" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.418690 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.433239 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jxf26_1dec43e3-8363-43ca-a96b-6127086f75db/console/0.log" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.433652 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jxf26" event={"ID":"1dec43e3-8363-43ca-a96b-6127086f75db","Type":"ContainerDied","Data":"ec1bf6a62a079387422d09c78951020accd37aaef8b20d7d45e74cd9e1ce4e82"} Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.433695 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jxf26" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.433719 4943 scope.go:117] "RemoveContainer" containerID="1557192a6d7fee4b0900b4925e7ff68190f8f79047ae88efec5e0e96b6edd04a" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.618517 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.618637 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619057 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619097 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619118 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6ncq\" (UniqueName: \"kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619141 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619159 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca\") pod \"1dec43e3-8363-43ca-a96b-6127086f75db\" (UID: \"1dec43e3-8363-43ca-a96b-6127086f75db\") " Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.619921 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.620013 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.620050 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config" (OuterVolumeSpecName: "console-config") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.620404 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca" (OuterVolumeSpecName: "service-ca") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.626014 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq" (OuterVolumeSpecName: "kube-api-access-v6ncq") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "kube-api-access-v6ncq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.626321 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.626690 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1dec43e3-8363-43ca-a96b-6127086f75db" (UID: "1dec43e3-8363-43ca-a96b-6127086f75db"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720082 4943 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720522 4943 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720537 4943 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-console-config\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720547 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6ncq\" (UniqueName: \"kubernetes.io/projected/1dec43e3-8363-43ca-a96b-6127086f75db-kube-api-access-v6ncq\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720591 4943 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1dec43e3-8363-43ca-a96b-6127086f75db-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720604 4943 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.720615 4943 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1dec43e3-8363-43ca-a96b-6127086f75db-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.773735 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:51:23 crc kubenswrapper[4943]: I1129 06:51:23.790920 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-jxf26"] Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.443717 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerStarted","Data":"1c9915821b74531522fe1bda5d4845d4f509594ffc5c322b889de336043c5fa1"} Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.446443 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" event={"ID":"b458e00f-1442-452e-9588-5c4b822e1bf8","Type":"ContainerStarted","Data":"7cef90dbe0fde89080e03be56b103aed3bf099ff1f3dd5c7c6876294cd8ee32a"} Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.446645 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.448805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerStarted","Data":"00c7e0c0977e92568bbfc7fc16c452f54ab496d3f7ba6a6c771a313615697942"} Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.480065 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4r826" podStartSLOduration=22.827061989 podStartE2EDuration="1m47.480039432s" podCreationTimestamp="2025-11-29 06:49:37 +0000 UTC" firstStartedPulling="2025-11-29 06:49:56.911233857 +0000 UTC m=+971.841322610" lastFinishedPulling="2025-11-29 06:51:21.5642113 +0000 UTC m=+1056.494300053" observedRunningTime="2025-11-29 06:51:24.470846308 +0000 UTC m=+1059.400935071" watchObservedRunningTime="2025-11-29 06:51:24.480039432 +0000 UTC m=+1059.410128185" Nov 29 06:51:24 crc kubenswrapper[4943]: I1129 06:51:24.524104 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" podStartSLOduration=2.771068112 podStartE2EDuration="49.52408746s" podCreationTimestamp="2025-11-29 06:50:35 +0000 UTC" firstStartedPulling="2025-11-29 06:50:36.397508773 +0000 UTC m=+1011.327597526" lastFinishedPulling="2025-11-29 06:51:23.150528121 +0000 UTC m=+1058.080616874" observedRunningTime="2025-11-29 06:51:24.519885318 +0000 UTC m=+1059.449974081" watchObservedRunningTime="2025-11-29 06:51:24.52408746 +0000 UTC m=+1059.454176213" Nov 29 06:51:25 crc kubenswrapper[4943]: I1129 06:51:25.344405 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" path="/var/lib/kubelet/pods/1dec43e3-8363-43ca-a96b-6127086f75db/volumes" Nov 29 06:51:25 crc kubenswrapper[4943]: I1129 06:51:25.461075 4943 generic.go:334] "Generic (PLEG): container finished" podID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerID="00c7e0c0977e92568bbfc7fc16c452f54ab496d3f7ba6a6c771a313615697942" exitCode=0 Nov 29 06:51:25 crc kubenswrapper[4943]: I1129 06:51:25.461185 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerDied","Data":"00c7e0c0977e92568bbfc7fc16c452f54ab496d3f7ba6a6c771a313615697942"} Nov 29 06:51:26 crc kubenswrapper[4943]: I1129 06:51:26.401041 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 06:51:28 crc kubenswrapper[4943]: I1129 06:51:28.296476 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:28 crc kubenswrapper[4943]: I1129 06:51:28.296603 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:28 crc kubenswrapper[4943]: I1129 06:51:28.333675 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:28 crc kubenswrapper[4943]: I1129 06:51:28.515262 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:28 crc kubenswrapper[4943]: I1129 06:51:28.562595 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:51:29 crc kubenswrapper[4943]: I1129 06:51:29.487655 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-kf6kc" event={"ID":"32aeaef6-5f98-42e2-97c3-65c6494f256c","Type":"ContainerStarted","Data":"95c0fa60805f1e0e65f00d751226d0fb2fbadd6c4e1ec3fc507ae63ef89af49f"} Nov 29 06:51:29 crc kubenswrapper[4943]: I1129 06:51:29.487931 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:51:29 crc kubenswrapper[4943]: I1129 06:51:29.506007 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-kf6kc" podStartSLOduration=1.6093483979999998 podStartE2EDuration="54.505988353s" podCreationTimestamp="2025-11-29 06:50:35 +0000 UTC" firstStartedPulling="2025-11-29 06:50:35.677103439 +0000 UTC m=+1010.607192192" lastFinishedPulling="2025-11-29 06:51:28.573743394 +0000 UTC m=+1063.503832147" observedRunningTime="2025-11-29 06:51:29.502440688 +0000 UTC m=+1064.432529441" watchObservedRunningTime="2025-11-29 06:51:29.505988353 +0000 UTC m=+1064.436077106" Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.492823 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4r826" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="registry-server" containerID="cri-o://1c9915821b74531522fe1bda5d4845d4f509594ffc5c322b889de336043c5fa1" gracePeriod=2 Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.973650 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:51:30 crc kubenswrapper[4943]: E1129 06:51:30.973869 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.973880 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.973978 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dec43e3-8363-43ca-a96b-6127086f75db" containerName="console" Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.974783 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:30 crc kubenswrapper[4943]: I1129 06:51:30.989349 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.161712 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knz57\" (UniqueName: \"kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.161790 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.161824 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.263888 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knz57\" (UniqueName: \"kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.263961 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.263993 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.264449 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.264523 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.295244 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knz57\" (UniqueName: \"kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57\") pod \"community-operators-ktcb9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.500041 4943 generic.go:334] "Generic (PLEG): container finished" podID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerID="1c9915821b74531522fe1bda5d4845d4f509594ffc5c322b889de336043c5fa1" exitCode=0 Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.500094 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerDied","Data":"1c9915821b74531522fe1bda5d4845d4f509594ffc5c322b889de336043c5fa1"} Nov 29 06:51:31 crc kubenswrapper[4943]: I1129 06:51:31.595894 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:51:32 crc kubenswrapper[4943]: I1129 06:51:32.613402 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:51:32 crc kubenswrapper[4943]: I1129 06:51:32.613554 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:51:32 crc kubenswrapper[4943]: I1129 06:51:32.613642 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:51:32 crc kubenswrapper[4943]: I1129 06:51:32.614474 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:51:32 crc kubenswrapper[4943]: I1129 06:51:32.614545 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e" gracePeriod=600 Nov 29 06:51:33 crc kubenswrapper[4943]: I1129 06:51:33.514044 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e" exitCode=0 Nov 29 06:51:33 crc kubenswrapper[4943]: I1129 06:51:33.514088 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e"} Nov 29 06:51:33 crc kubenswrapper[4943]: I1129 06:51:33.514588 4943 scope.go:117] "RemoveContainer" containerID="fa03227f4d437f0a53532154ae212e7074c3ffe3db244477095def4be235847f" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.524161 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4r826" event={"ID":"dca52725-b0d8-4747-8277-5e4a52465b7e","Type":"ContainerDied","Data":"f16a48389751bd73a3b686ae2aaf7172559121e98335eba7e322838d38309f1a"} Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.524206 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f16a48389751bd73a3b686ae2aaf7172559121e98335eba7e322838d38309f1a" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.525311 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.627082 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities\") pod \"dca52725-b0d8-4747-8277-5e4a52465b7e\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.627176 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9b42\" (UniqueName: \"kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42\") pod \"dca52725-b0d8-4747-8277-5e4a52465b7e\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.627429 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content\") pod \"dca52725-b0d8-4747-8277-5e4a52465b7e\" (UID: \"dca52725-b0d8-4747-8277-5e4a52465b7e\") " Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.628307 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities" (OuterVolumeSpecName: "utilities") pod "dca52725-b0d8-4747-8277-5e4a52465b7e" (UID: "dca52725-b0d8-4747-8277-5e4a52465b7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.631969 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42" (OuterVolumeSpecName: "kube-api-access-x9b42") pod "dca52725-b0d8-4747-8277-5e4a52465b7e" (UID: "dca52725-b0d8-4747-8277-5e4a52465b7e"). InnerVolumeSpecName "kube-api-access-x9b42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.646309 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dca52725-b0d8-4747-8277-5e4a52465b7e" (UID: "dca52725-b0d8-4747-8277-5e4a52465b7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.728990 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.729027 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dca52725-b0d8-4747-8277-5e4a52465b7e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:34 crc kubenswrapper[4943]: I1129 06:51:34.729039 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9b42\" (UniqueName: \"kubernetes.io/projected/dca52725-b0d8-4747-8277-5e4a52465b7e-kube-api-access-x9b42\") on node \"crc\" DevicePath \"\"" Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.385068 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:51:35 crc kubenswrapper[4943]: W1129 06:51:35.388746 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod893757d6_44a8_43dd_99c0_0e4d9be158a9.slice/crio-7cbe1bc2ac288cfd61e330a2649bef4ae4427ca3d08d5e6de1b2c16e344f0364 WatchSource:0}: Error finding container 7cbe1bc2ac288cfd61e330a2649bef4ae4427ca3d08d5e6de1b2c16e344f0364: Status 404 returned error can't find the container with id 7cbe1bc2ac288cfd61e330a2649bef4ae4427ca3d08d5e6de1b2c16e344f0364 Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.532523 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerStarted","Data":"7cbe1bc2ac288cfd61e330a2649bef4ae4427ca3d08d5e6de1b2c16e344f0364"} Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.532584 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4r826" Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.551492 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.554798 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4r826"] Nov 29 06:51:35 crc kubenswrapper[4943]: I1129 06:51:35.674610 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-kf6kc" Nov 29 06:51:36 crc kubenswrapper[4943]: I1129 06:51:36.192533 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-7xj8x" Nov 29 06:51:37 crc kubenswrapper[4943]: I1129 06:51:37.336287 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" path="/var/lib/kubelet/pods/dca52725-b0d8-4747-8277-5e4a52465b7e/volumes" Nov 29 06:51:47 crc kubenswrapper[4943]: I1129 06:51:47.598821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" event={"ID":"83d362f9-3c5d-4ee3-98fd-dea1eec92b79","Type":"ContainerStarted","Data":"5f4d358f4d752a168cc4af5a7f4734e97936c5a13e2cfa32f0f8532f2449bc1c"} Nov 29 06:51:48 crc kubenswrapper[4943]: I1129 06:51:48.606854 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerStarted","Data":"24da48d6bbfed124eb4102143d75658bb038a5d2b717b2edde7ef29b3282dbe2"} Nov 29 06:51:48 crc kubenswrapper[4943]: I1129 06:51:48.608689 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerStarted","Data":"ad0b52b6428f816baa169a26b170539b818a3475fff4bd49e1e4b673bd2ce72e"} Nov 29 06:51:52 crc kubenswrapper[4943]: I1129 06:51:52.629791 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188"} Nov 29 06:51:56 crc kubenswrapper[4943]: E1129 06:51:56.073417 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" podUID="83d362f9-3c5d-4ee3-98fd-dea1eec92b79" Nov 29 06:51:56 crc kubenswrapper[4943]: I1129 06:51:56.653473 4943 generic.go:334] "Generic (PLEG): container finished" podID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerID="ad0b52b6428f816baa169a26b170539b818a3475fff4bd49e1e4b673bd2ce72e" exitCode=0 Nov 29 06:51:56 crc kubenswrapper[4943]: I1129 06:51:56.653599 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerDied","Data":"ad0b52b6428f816baa169a26b170539b818a3475fff4bd49e1e4b673bd2ce72e"} Nov 29 06:51:56 crc kubenswrapper[4943]: I1129 06:51:56.709449 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wfpq8" podStartSLOduration=62.039891817 podStartE2EDuration="2m15.709428024s" podCreationTimestamp="2025-11-29 06:49:41 +0000 UTC" firstStartedPulling="2025-11-29 06:50:21.495348467 +0000 UTC m=+996.425437230" lastFinishedPulling="2025-11-29 06:51:35.164884684 +0000 UTC m=+1070.094973437" observedRunningTime="2025-11-29 06:51:56.702937036 +0000 UTC m=+1091.633025799" watchObservedRunningTime="2025-11-29 06:51:56.709428024 +0000 UTC m=+1091.639516777" Nov 29 06:51:59 crc kubenswrapper[4943]: I1129 06:51:59.674189 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" event={"ID":"4f1e28f0-85d9-4d51-900a-33ab52c3d087","Type":"ContainerStarted","Data":"36339495d0ce16482cfcb96937ba8ddaaf4b98035eb3de3e0f3ff48226457117"} Nov 29 06:51:59 crc kubenswrapper[4943]: I1129 06:51:59.696124 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-fsz5q" podStartSLOduration=2.756490243 podStartE2EDuration="1m24.696103224s" podCreationTimestamp="2025-11-29 06:50:35 +0000 UTC" firstStartedPulling="2025-11-29 06:50:36.816997867 +0000 UTC m=+1011.747086620" lastFinishedPulling="2025-11-29 06:51:58.756610848 +0000 UTC m=+1093.686699601" observedRunningTime="2025-11-29 06:51:59.687803432 +0000 UTC m=+1094.617892185" watchObservedRunningTime="2025-11-29 06:51:59.696103224 +0000 UTC m=+1094.626191977" Nov 29 06:52:01 crc kubenswrapper[4943]: I1129 06:52:01.679941 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:01 crc kubenswrapper[4943]: I1129 06:52:01.680649 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:01 crc kubenswrapper[4943]: I1129 06:52:01.722391 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:02 crc kubenswrapper[4943]: I1129 06:52:02.727132 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:02 crc kubenswrapper[4943]: I1129 06:52:02.764117 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:52:04 crc kubenswrapper[4943]: I1129 06:52:04.703893 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wfpq8" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="registry-server" containerID="cri-o://24da48d6bbfed124eb4102143d75658bb038a5d2b717b2edde7ef29b3282dbe2" gracePeriod=2 Nov 29 06:52:05 crc kubenswrapper[4943]: I1129 06:52:05.721623 4943 generic.go:334] "Generic (PLEG): container finished" podID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerID="f5cfff280d22488fc7b84ad0f178e98ca554648ed1aba6de6ef002cede323013" exitCode=0 Nov 29 06:52:05 crc kubenswrapper[4943]: I1129 06:52:05.721708 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerDied","Data":"f5cfff280d22488fc7b84ad0f178e98ca554648ed1aba6de6ef002cede323013"} Nov 29 06:52:05 crc kubenswrapper[4943]: I1129 06:52:05.724514 4943 generic.go:334] "Generic (PLEG): container finished" podID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerID="24da48d6bbfed124eb4102143d75658bb038a5d2b717b2edde7ef29b3282dbe2" exitCode=0 Nov 29 06:52:05 crc kubenswrapper[4943]: I1129 06:52:05.724554 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerDied","Data":"24da48d6bbfed124eb4102143d75658bb038a5d2b717b2edde7ef29b3282dbe2"} Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.630459 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.659153 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkbft\" (UniqueName: \"kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft\") pod \"72c4f76f-b84f-41f7-b8df-047ba694c94b\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.659277 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities\") pod \"72c4f76f-b84f-41f7-b8df-047ba694c94b\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.659304 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content\") pod \"72c4f76f-b84f-41f7-b8df-047ba694c94b\" (UID: \"72c4f76f-b84f-41f7-b8df-047ba694c94b\") " Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.663357 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities" (OuterVolumeSpecName: "utilities") pod "72c4f76f-b84f-41f7-b8df-047ba694c94b" (UID: "72c4f76f-b84f-41f7-b8df-047ba694c94b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.693398 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft" (OuterVolumeSpecName: "kube-api-access-xkbft") pod "72c4f76f-b84f-41f7-b8df-047ba694c94b" (UID: "72c4f76f-b84f-41f7-b8df-047ba694c94b"). InnerVolumeSpecName "kube-api-access-xkbft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.720201 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72c4f76f-b84f-41f7-b8df-047ba694c94b" (UID: "72c4f76f-b84f-41f7-b8df-047ba694c94b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.735694 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" event={"ID":"83d362f9-3c5d-4ee3-98fd-dea1eec92b79","Type":"ContainerStarted","Data":"5ca44a5cdf36f361d58691a2b06cde24e98934ccaeadf6668e2d8961aefbf90a"} Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.738882 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wfpq8" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.739963 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wfpq8" event={"ID":"72c4f76f-b84f-41f7-b8df-047ba694c94b","Type":"ContainerDied","Data":"9bb894e03507c4121c074f6c1d0e72055bfbc34c3bdc9aeb51327763e07ec8b8"} Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.740055 4943 scope.go:117] "RemoveContainer" containerID="24da48d6bbfed124eb4102143d75658bb038a5d2b717b2edde7ef29b3282dbe2" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.764451 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkbft\" (UniqueName: \"kubernetes.io/projected/72c4f76f-b84f-41f7-b8df-047ba694c94b-kube-api-access-xkbft\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.764499 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.764513 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72c4f76f-b84f-41f7-b8df-047ba694c94b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.773440 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:52:06 crc kubenswrapper[4943]: I1129 06:52:06.776375 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wfpq8"] Nov 29 06:52:07 crc kubenswrapper[4943]: I1129 06:52:07.159331 4943 scope.go:117] "RemoveContainer" containerID="00c7e0c0977e92568bbfc7fc16c452f54ab496d3f7ba6a6c771a313615697942" Nov 29 06:52:07 crc kubenswrapper[4943]: I1129 06:52:07.335274 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" path="/var/lib/kubelet/pods/72c4f76f-b84f-41f7-b8df-047ba694c94b/volumes" Nov 29 06:52:07 crc kubenswrapper[4943]: I1129 06:52:07.420852 4943 scope.go:117] "RemoveContainer" containerID="efaad2aa3c194cd81c62c9606bbfb60bdc7e2e95afa291816fd1497d53ad75dd" Nov 29 06:52:07 crc kubenswrapper[4943]: I1129 06:52:07.763386 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-nmd65" podStartSLOduration=2.433683869 podStartE2EDuration="1m32.763364492s" podCreationTimestamp="2025-11-29 06:50:35 +0000 UTC" firstStartedPulling="2025-11-29 06:50:36.064203159 +0000 UTC m=+1010.994291912" lastFinishedPulling="2025-11-29 06:52:06.393883782 +0000 UTC m=+1101.323972535" observedRunningTime="2025-11-29 06:52:07.761866066 +0000 UTC m=+1102.691954829" watchObservedRunningTime="2025-11-29 06:52:07.763364492 +0000 UTC m=+1102.693453245" Nov 29 06:52:08 crc kubenswrapper[4943]: I1129 06:52:08.755604 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerStarted","Data":"30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa"} Nov 29 06:52:08 crc kubenswrapper[4943]: I1129 06:52:08.776759 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ktcb9" podStartSLOduration=28.263740897 podStartE2EDuration="38.776737621s" podCreationTimestamp="2025-11-29 06:51:30 +0000 UTC" firstStartedPulling="2025-11-29 06:51:57.553641227 +0000 UTC m=+1092.483729970" lastFinishedPulling="2025-11-29 06:52:08.066637941 +0000 UTC m=+1102.996726694" observedRunningTime="2025-11-29 06:52:08.772609411 +0000 UTC m=+1103.702698164" watchObservedRunningTime="2025-11-29 06:52:08.776737621 +0000 UTC m=+1103.706826374" Nov 29 06:52:11 crc kubenswrapper[4943]: I1129 06:52:11.596519 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:11 crc kubenswrapper[4943]: I1129 06:52:11.596888 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:11 crc kubenswrapper[4943]: I1129 06:52:11.637042 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227179 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm"] Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227710 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="extract-utilities" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227726 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="extract-utilities" Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227744 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227751 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227760 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="extract-content" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227766 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="extract-content" Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227775 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="extract-content" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227780 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="extract-content" Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227789 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="extract-utilities" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227795 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="extract-utilities" Nov 29 06:52:18 crc kubenswrapper[4943]: E1129 06:52:18.227805 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227810 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227926 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="dca52725-b0d8-4747-8277-5e4a52465b7e" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.227943 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="72c4f76f-b84f-41f7-b8df-047ba694c94b" containerName="registry-server" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.228708 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.232814 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.238452 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm"] Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.326518 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.326590 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.326648 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk984\" (UniqueName: \"kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.427389 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.427712 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.427785 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk984\" (UniqueName: \"kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.427914 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.428148 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.448408 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk984\" (UniqueName: \"kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.545739 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.755310 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm"] Nov 29 06:52:18 crc kubenswrapper[4943]: W1129 06:52:18.759537 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4141f7e_d480_47fb_9471_727df764b4ec.slice/crio-36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d WatchSource:0}: Error finding container 36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d: Status 404 returned error can't find the container with id 36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d Nov 29 06:52:18 crc kubenswrapper[4943]: I1129 06:52:18.826469 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerStarted","Data":"36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d"} Nov 29 06:52:21 crc kubenswrapper[4943]: I1129 06:52:21.664376 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:23 crc kubenswrapper[4943]: I1129 06:52:23.926983 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:52:23 crc kubenswrapper[4943]: I1129 06:52:23.927503 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ktcb9" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="registry-server" containerID="cri-o://30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" gracePeriod=2 Nov 29 06:52:31 crc kubenswrapper[4943]: E1129 06:52:31.597040 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa is running failed: container process not found" containerID="30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:52:31 crc kubenswrapper[4943]: E1129 06:52:31.599469 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa is running failed: container process not found" containerID="30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:52:31 crc kubenswrapper[4943]: E1129 06:52:31.599986 4943 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa is running failed: container process not found" containerID="30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 06:52:31 crc kubenswrapper[4943]: E1129 06:52:31.600048 4943 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-ktcb9" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="registry-server" Nov 29 06:52:37 crc kubenswrapper[4943]: I1129 06:52:37.808730 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ktcb9_893757d6-44a8-43dd-99c0-0e4d9be158a9/registry-server/0.log" Nov 29 06:52:37 crc kubenswrapper[4943]: I1129 06:52:37.810181 4943 generic.go:334] "Generic (PLEG): container finished" podID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerID="30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" exitCode=-1 Nov 29 06:52:37 crc kubenswrapper[4943]: I1129 06:52:37.810234 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerDied","Data":"30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa"} Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.288285 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.427794 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content\") pod \"893757d6-44a8-43dd-99c0-0e4d9be158a9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.427880 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities\") pod \"893757d6-44a8-43dd-99c0-0e4d9be158a9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.428003 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knz57\" (UniqueName: \"kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57\") pod \"893757d6-44a8-43dd-99c0-0e4d9be158a9\" (UID: \"893757d6-44a8-43dd-99c0-0e4d9be158a9\") " Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.428889 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities" (OuterVolumeSpecName: "utilities") pod "893757d6-44a8-43dd-99c0-0e4d9be158a9" (UID: "893757d6-44a8-43dd-99c0-0e4d9be158a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.433687 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57" (OuterVolumeSpecName: "kube-api-access-knz57") pod "893757d6-44a8-43dd-99c0-0e4d9be158a9" (UID: "893757d6-44a8-43dd-99c0-0e4d9be158a9"). InnerVolumeSpecName "kube-api-access-knz57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.489770 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "893757d6-44a8-43dd-99c0-0e4d9be158a9" (UID: "893757d6-44a8-43dd-99c0-0e4d9be158a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.530097 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knz57\" (UniqueName: \"kubernetes.io/projected/893757d6-44a8-43dd-99c0-0e4d9be158a9-kube-api-access-knz57\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.530146 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.530161 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/893757d6-44a8-43dd-99c0-0e4d9be158a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.822543 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktcb9" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.822526 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktcb9" event={"ID":"893757d6-44a8-43dd-99c0-0e4d9be158a9","Type":"ContainerDied","Data":"7cbe1bc2ac288cfd61e330a2649bef4ae4427ca3d08d5e6de1b2c16e344f0364"} Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.822803 4943 scope.go:117] "RemoveContainer" containerID="30c9e6c3b1093a0e6375cdcc4b6fb3079ee071306aa97a2939fb743b2c3936aa" Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.823906 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerStarted","Data":"205b024122f8c84816069985c7ee094ee97b6ffed50b44fdd2cd655b92bbddd8"} Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.868590 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:52:38 crc kubenswrapper[4943]: I1129 06:52:38.874590 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ktcb9"] Nov 29 06:52:39 crc kubenswrapper[4943]: I1129 06:52:39.339860 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" path="/var/lib/kubelet/pods/893757d6-44a8-43dd-99c0-0e4d9be158a9/volumes" Nov 29 06:52:39 crc kubenswrapper[4943]: I1129 06:52:39.833334 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4141f7e-d480-47fb-9471-727df764b4ec" containerID="205b024122f8c84816069985c7ee094ee97b6ffed50b44fdd2cd655b92bbddd8" exitCode=0 Nov 29 06:52:39 crc kubenswrapper[4943]: I1129 06:52:39.833458 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerDied","Data":"205b024122f8c84816069985c7ee094ee97b6ffed50b44fdd2cd655b92bbddd8"} Nov 29 06:52:50 crc kubenswrapper[4943]: I1129 06:52:50.473221 4943 scope.go:117] "RemoveContainer" containerID="f5cfff280d22488fc7b84ad0f178e98ca554648ed1aba6de6ef002cede323013" Nov 29 06:52:50 crc kubenswrapper[4943]: I1129 06:52:50.496463 4943 scope.go:117] "RemoveContainer" containerID="ad0b52b6428f816baa169a26b170539b818a3475fff4bd49e1e4b673bd2ce72e" Nov 29 06:52:59 crc kubenswrapper[4943]: I1129 06:52:59.968744 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4141f7e-d480-47fb-9471-727df764b4ec" containerID="1d9d6ccccc715f0cce55ad5fbf5edf5b2a3b6a2e5d59f905963292fbdb1a2c2e" exitCode=0 Nov 29 06:52:59 crc kubenswrapper[4943]: I1129 06:52:59.968895 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerDied","Data":"1d9d6ccccc715f0cce55ad5fbf5edf5b2a3b6a2e5d59f905963292fbdb1a2c2e"} Nov 29 06:53:00 crc kubenswrapper[4943]: I1129 06:53:00.980274 4943 generic.go:334] "Generic (PLEG): container finished" podID="f4141f7e-d480-47fb-9471-727df764b4ec" containerID="b3f0050dd85f2d57b5ef7f6c7228fc9cd36849b81712705f39f9d2a5d3d27026" exitCode=0 Nov 29 06:53:00 crc kubenswrapper[4943]: I1129 06:53:00.980387 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerDied","Data":"b3f0050dd85f2d57b5ef7f6c7228fc9cd36849b81712705f39f9d2a5d3d27026"} Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.207173 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.336203 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util\") pod \"f4141f7e-d480-47fb-9471-727df764b4ec\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.336304 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk984\" (UniqueName: \"kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984\") pod \"f4141f7e-d480-47fb-9471-727df764b4ec\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.336350 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle\") pod \"f4141f7e-d480-47fb-9471-727df764b4ec\" (UID: \"f4141f7e-d480-47fb-9471-727df764b4ec\") " Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.337600 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle" (OuterVolumeSpecName: "bundle") pod "f4141f7e-d480-47fb-9471-727df764b4ec" (UID: "f4141f7e-d480-47fb-9471-727df764b4ec"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.341991 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984" (OuterVolumeSpecName: "kube-api-access-dk984") pod "f4141f7e-d480-47fb-9471-727df764b4ec" (UID: "f4141f7e-d480-47fb-9471-727df764b4ec"). InnerVolumeSpecName "kube-api-access-dk984". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.350514 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util" (OuterVolumeSpecName: "util") pod "f4141f7e-d480-47fb-9471-727df764b4ec" (UID: "f4141f7e-d480-47fb-9471-727df764b4ec"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.438080 4943 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-util\") on node \"crc\" DevicePath \"\"" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.438427 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk984\" (UniqueName: \"kubernetes.io/projected/f4141f7e-d480-47fb-9471-727df764b4ec-kube-api-access-dk984\") on node \"crc\" DevicePath \"\"" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.438505 4943 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f4141f7e-d480-47fb-9471-727df764b4ec-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.994027 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" event={"ID":"f4141f7e-d480-47fb-9471-727df764b4ec","Type":"ContainerDied","Data":"36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d"} Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.994071 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36cf173958f94f28da8b2334cea4561294041b37b9c5dc1a79f6bd8bf302da7d" Nov 29 06:53:02 crc kubenswrapper[4943]: I1129 06:53:02.994091 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.197920 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq"] Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.198947 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="pull" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.198963 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="pull" Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.198981 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="extract-content" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.198990 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="extract-content" Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.198998 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="util" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199006 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="util" Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.199016 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="extract-utilities" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199025 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="extract-utilities" Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.199035 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="registry-server" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199045 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="registry-server" Nov 29 06:53:17 crc kubenswrapper[4943]: E1129 06:53:17.199059 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="extract" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199067 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="extract" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199197 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="893757d6-44a8-43dd-99c0-0e4d9be158a9" containerName="registry-server" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199212 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4141f7e-d480-47fb-9471-727df764b4ec" containerName="extract" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.199700 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.202197 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.202604 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-965jj" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.202749 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.202932 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.208786 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.219739 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq"] Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.337957 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-webhook-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.338054 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-apiservice-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.338102 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drmvb\" (UniqueName: \"kubernetes.io/projected/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-kube-api-access-drmvb\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.439899 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-webhook-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.440067 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-apiservice-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.440193 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drmvb\" (UniqueName: \"kubernetes.io/projected/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-kube-api-access-drmvb\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.447356 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-webhook-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.463716 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-apiservice-cert\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.470385 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq"] Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.474050 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.478999 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.479125 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-jkf9w" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.479272 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.479747 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drmvb\" (UniqueName: \"kubernetes.io/projected/07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433-kube-api-access-drmvb\") pod \"metallb-operator-controller-manager-55bbfd58d6-j4fhq\" (UID: \"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433\") " pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.488530 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq"] Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.525144 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.544358 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wxg5\" (UniqueName: \"kubernetes.io/projected/408be66a-ef5a-472c-aa9f-31e20666f3ee-kube-api-access-2wxg5\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.544757 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-webhook-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.544895 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-apiservice-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.645912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-webhook-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.646304 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-apiservice-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.646363 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wxg5\" (UniqueName: \"kubernetes.io/projected/408be66a-ef5a-472c-aa9f-31e20666f3ee-kube-api-access-2wxg5\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.650256 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-webhook-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.651192 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/408be66a-ef5a-472c-aa9f-31e20666f3ee-apiservice-cert\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.678386 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wxg5\" (UniqueName: \"kubernetes.io/projected/408be66a-ef5a-472c-aa9f-31e20666f3ee-kube-api-access-2wxg5\") pod \"metallb-operator-webhook-server-7c4cd9bdff-jvhwq\" (UID: \"408be66a-ef5a-472c-aa9f-31e20666f3ee\") " pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:17 crc kubenswrapper[4943]: I1129 06:53:17.832868 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:18 crc kubenswrapper[4943]: I1129 06:53:18.557440 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq"] Nov 29 06:53:18 crc kubenswrapper[4943]: I1129 06:53:18.706236 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq"] Nov 29 06:53:18 crc kubenswrapper[4943]: W1129 06:53:18.723610 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod408be66a_ef5a_472c_aa9f_31e20666f3ee.slice/crio-6858cde9878a87328b9196d85e5e6233f0dd2402dc8cea19d791c9b623342b69 WatchSource:0}: Error finding container 6858cde9878a87328b9196d85e5e6233f0dd2402dc8cea19d791c9b623342b69: Status 404 returned error can't find the container with id 6858cde9878a87328b9196d85e5e6233f0dd2402dc8cea19d791c9b623342b69 Nov 29 06:53:19 crc kubenswrapper[4943]: I1129 06:53:19.120190 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" event={"ID":"408be66a-ef5a-472c-aa9f-31e20666f3ee","Type":"ContainerStarted","Data":"6858cde9878a87328b9196d85e5e6233f0dd2402dc8cea19d791c9b623342b69"} Nov 29 06:53:19 crc kubenswrapper[4943]: I1129 06:53:19.121982 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" event={"ID":"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433","Type":"ContainerStarted","Data":"d3b793734d48cb525fa1beb0978948714f27ad0c23133fdf9038712ad319c981"} Nov 29 06:53:47 crc kubenswrapper[4943]: I1129 06:53:47.341339 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" event={"ID":"408be66a-ef5a-472c-aa9f-31e20666f3ee","Type":"ContainerStarted","Data":"82bddfb71e69c5f7acaea9f9de99793effdbb36751e066210eecad5d7eacb093"} Nov 29 06:53:47 crc kubenswrapper[4943]: I1129 06:53:47.341950 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:47 crc kubenswrapper[4943]: I1129 06:53:47.342821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" event={"ID":"07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433","Type":"ContainerStarted","Data":"d4526a9721f13cc568bfdbb45ece05d3b94bfc99d96aca761585cdc9a67ed039"} Nov 29 06:53:47 crc kubenswrapper[4943]: I1129 06:53:47.343051 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:53:47 crc kubenswrapper[4943]: I1129 06:53:47.362998 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" podStartSLOduration=3.333801505 podStartE2EDuration="30.362981007s" podCreationTimestamp="2025-11-29 06:53:17 +0000 UTC" firstStartedPulling="2025-11-29 06:53:18.725418777 +0000 UTC m=+1173.655507540" lastFinishedPulling="2025-11-29 06:53:45.754598289 +0000 UTC m=+1200.684687042" observedRunningTime="2025-11-29 06:53:47.362044404 +0000 UTC m=+1202.292133167" watchObservedRunningTime="2025-11-29 06:53:47.362981007 +0000 UTC m=+1202.293069760" Nov 29 06:53:57 crc kubenswrapper[4943]: I1129 06:53:57.837890 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7c4cd9bdff-jvhwq" Nov 29 06:53:57 crc kubenswrapper[4943]: I1129 06:53:57.863539 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" podStartSLOduration=13.713386339 podStartE2EDuration="40.863520853s" podCreationTimestamp="2025-11-29 06:53:17 +0000 UTC" firstStartedPulling="2025-11-29 06:53:18.586740885 +0000 UTC m=+1173.516829638" lastFinishedPulling="2025-11-29 06:53:45.736875399 +0000 UTC m=+1200.666964152" observedRunningTime="2025-11-29 06:53:47.38663298 +0000 UTC m=+1202.316721743" watchObservedRunningTime="2025-11-29 06:53:57.863520853 +0000 UTC m=+1212.793609606" Nov 29 06:54:02 crc kubenswrapper[4943]: I1129 06:54:02.613402 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:54:02 crc kubenswrapper[4943]: I1129 06:54:02.613795 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:54:17 crc kubenswrapper[4943]: I1129 06:54:17.529073 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-55bbfd58d6-j4fhq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.471666 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.472910 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.476402 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.476722 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-gvtql" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.480482 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dt7vh"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.483341 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.486096 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.489697 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.495489 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.582993 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-7q88r"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.585373 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.588011 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.593040 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-qhsdq"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.594056 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.594991 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-2g787" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.595288 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.595509 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.598235 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.606077 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-qhsdq"] Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.669655 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-sockets\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.669715 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics-certs\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.669753 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-conf\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670074 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670233 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-startup\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670329 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-reloader\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670373 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf4kx\" (UniqueName: \"kubernetes.io/projected/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-kube-api-access-kf4kx\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670409 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pw9g\" (UniqueName: \"kubernetes.io/projected/4ad8fae5-ebf3-406c-b971-f15b1978e82c-kube-api-access-9pw9g\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.670484 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771450 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics-certs\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771515 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmtmn\" (UniqueName: \"kubernetes.io/projected/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-kube-api-access-vmtmn\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771549 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-conf\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771578 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771597 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771621 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-startup\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771638 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-cert\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771658 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwclw\" (UniqueName: \"kubernetes.io/projected/c2c18b1d-2ac5-4321-a137-250557efd955-kube-api-access-gwclw\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771676 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-reloader\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.771700 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf4kx\" (UniqueName: \"kubernetes.io/projected/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-kube-api-access-kf4kx\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772045 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pw9g\" (UniqueName: \"kubernetes.io/projected/4ad8fae5-ebf3-406c-b971-f15b1978e82c-kube-api-access-9pw9g\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772067 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772087 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metallb-excludel2\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772111 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772134 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-sockets\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772158 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772157 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-conf\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772311 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-reloader\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772632 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-sockets\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.772916 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.773082 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4ad8fae5-ebf3-406c-b971-f15b1978e82c-frr-startup\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.777691 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ad8fae5-ebf3-406c-b971-f15b1978e82c-metrics-certs\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.777819 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.789486 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf4kx\" (UniqueName: \"kubernetes.io/projected/65c5f0ea-c60a-4abd-b490-8bccde64fbc2-kube-api-access-kf4kx\") pod \"frr-k8s-webhook-server-7fcb986d4-mt5gz\" (UID: \"65c5f0ea-c60a-4abd-b490-8bccde64fbc2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.794367 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pw9g\" (UniqueName: \"kubernetes.io/projected/4ad8fae5-ebf3-406c-b971-f15b1978e82c-kube-api-access-9pw9g\") pod \"frr-k8s-dt7vh\" (UID: \"4ad8fae5-ebf3-406c-b971-f15b1978e82c\") " pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.799631 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.811288 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873196 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-cert\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873243 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwclw\" (UniqueName: \"kubernetes.io/projected/c2c18b1d-2ac5-4321-a137-250557efd955-kube-api-access-gwclw\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873267 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873286 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metallb-excludel2\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873324 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873347 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmtmn\" (UniqueName: \"kubernetes.io/projected/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-kube-api-access-vmtmn\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.873381 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.873497 4943 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.873547 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist podName:f60f2450-9ce3-4b8c-b268-6a0aab1cb075 nodeName:}" failed. No retries permitted until 2025-11-29 06:54:19.373530731 +0000 UTC m=+1234.303619484 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist") pod "speaker-7q88r" (UID: "f60f2450-9ce3-4b8c-b268-6a0aab1cb075") : secret "metallb-memberlist" not found Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.873722 4943 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.873744 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs podName:c2c18b1d-2ac5-4321-a137-250557efd955 nodeName:}" failed. No retries permitted until 2025-11-29 06:54:19.373737626 +0000 UTC m=+1234.303826379 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs") pod "controller-f8648f98b-qhsdq" (UID: "c2c18b1d-2ac5-4321-a137-250557efd955") : secret "controller-certs-secret" not found Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.873997 4943 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 29 06:54:18 crc kubenswrapper[4943]: E1129 06:54:18.874080 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs podName:f60f2450-9ce3-4b8c-b268-6a0aab1cb075 nodeName:}" failed. No retries permitted until 2025-11-29 06:54:19.374058313 +0000 UTC m=+1234.304147146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs") pod "speaker-7q88r" (UID: "f60f2450-9ce3-4b8c-b268-6a0aab1cb075") : secret "speaker-certs-secret" not found Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.874509 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metallb-excludel2\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.876953 4943 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.887628 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-cert\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.892852 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmtmn\" (UniqueName: \"kubernetes.io/projected/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-kube-api-access-vmtmn\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:18 crc kubenswrapper[4943]: I1129 06:54:18.893304 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwclw\" (UniqueName: \"kubernetes.io/projected/c2c18b1d-2ac5-4321-a137-250557efd955-kube-api-access-gwclw\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.230501 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz"] Nov 29 06:54:19 crc kubenswrapper[4943]: W1129 06:54:19.242969 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65c5f0ea_c60a_4abd_b490_8bccde64fbc2.slice/crio-47be19078772e51dd5c4aed69061150d476431fd78a5342010eccde857293ddd WatchSource:0}: Error finding container 47be19078772e51dd5c4aed69061150d476431fd78a5342010eccde857293ddd: Status 404 returned error can't find the container with id 47be19078772e51dd5c4aed69061150d476431fd78a5342010eccde857293ddd Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.381992 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.382061 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.382108 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:19 crc kubenswrapper[4943]: E1129 06:54:19.382156 4943 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 29 06:54:19 crc kubenswrapper[4943]: E1129 06:54:19.382263 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist podName:f60f2450-9ce3-4b8c-b268-6a0aab1cb075 nodeName:}" failed. No retries permitted until 2025-11-29 06:54:20.382230035 +0000 UTC m=+1235.312318788 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist") pod "speaker-7q88r" (UID: "f60f2450-9ce3-4b8c-b268-6a0aab1cb075") : secret "metallb-memberlist" not found Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.387097 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c2c18b1d-2ac5-4321-a137-250557efd955-metrics-certs\") pod \"controller-f8648f98b-qhsdq\" (UID: \"c2c18b1d-2ac5-4321-a137-250557efd955\") " pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.387285 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-metrics-certs\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.523164 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.549428 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" event={"ID":"65c5f0ea-c60a-4abd-b490-8bccde64fbc2","Type":"ContainerStarted","Data":"47be19078772e51dd5c4aed69061150d476431fd78a5342010eccde857293ddd"} Nov 29 06:54:19 crc kubenswrapper[4943]: I1129 06:54:19.771688 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-qhsdq"] Nov 29 06:54:19 crc kubenswrapper[4943]: W1129 06:54:19.775423 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2c18b1d_2ac5_4321_a137_250557efd955.slice/crio-3188b7a30c28206a0c782af84f86e40ac3f73bc0412e398c5c1afb6730c0a510 WatchSource:0}: Error finding container 3188b7a30c28206a0c782af84f86e40ac3f73bc0412e398c5c1afb6730c0a510: Status 404 returned error can't find the container with id 3188b7a30c28206a0c782af84f86e40ac3f73bc0412e398c5c1afb6730c0a510 Nov 29 06:54:20 crc kubenswrapper[4943]: I1129 06:54:20.397957 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:20 crc kubenswrapper[4943]: E1129 06:54:20.398119 4943 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 29 06:54:20 crc kubenswrapper[4943]: E1129 06:54:20.398470 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist podName:f60f2450-9ce3-4b8c-b268-6a0aab1cb075 nodeName:}" failed. No retries permitted until 2025-11-29 06:54:22.398452352 +0000 UTC m=+1237.328541115 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist") pod "speaker-7q88r" (UID: "f60f2450-9ce3-4b8c-b268-6a0aab1cb075") : secret "metallb-memberlist" not found Nov 29 06:54:20 crc kubenswrapper[4943]: I1129 06:54:20.558956 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qhsdq" event={"ID":"c2c18b1d-2ac5-4321-a137-250557efd955","Type":"ContainerStarted","Data":"3188b7a30c28206a0c782af84f86e40ac3f73bc0412e398c5c1afb6730c0a510"} Nov 29 06:54:21 crc kubenswrapper[4943]: I1129 06:54:21.565135 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"d119f66ce7704f8199b424bfc1012842365166a3787c39c33025b53450f9ba33"} Nov 29 06:54:22 crc kubenswrapper[4943]: I1129 06:54:22.427244 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:22 crc kubenswrapper[4943]: I1129 06:54:22.435054 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f60f2450-9ce3-4b8c-b268-6a0aab1cb075-memberlist\") pod \"speaker-7q88r\" (UID: \"f60f2450-9ce3-4b8c-b268-6a0aab1cb075\") " pod="metallb-system/speaker-7q88r" Nov 29 06:54:22 crc kubenswrapper[4943]: I1129 06:54:22.507789 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7q88r" Nov 29 06:54:22 crc kubenswrapper[4943]: W1129 06:54:22.529382 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60f2450_9ce3_4b8c_b268_6a0aab1cb075.slice/crio-7470b49bf5d6210139e6c75f342d6a89ff040e96ba320caace8c5ecc628c37cd WatchSource:0}: Error finding container 7470b49bf5d6210139e6c75f342d6a89ff040e96ba320caace8c5ecc628c37cd: Status 404 returned error can't find the container with id 7470b49bf5d6210139e6c75f342d6a89ff040e96ba320caace8c5ecc628c37cd Nov 29 06:54:22 crc kubenswrapper[4943]: I1129 06:54:22.571901 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7q88r" event={"ID":"f60f2450-9ce3-4b8c-b268-6a0aab1cb075","Type":"ContainerStarted","Data":"7470b49bf5d6210139e6c75f342d6a89ff040e96ba320caace8c5ecc628c37cd"} Nov 29 06:54:22 crc kubenswrapper[4943]: I1129 06:54:22.574833 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qhsdq" event={"ID":"c2c18b1d-2ac5-4321-a137-250557efd955","Type":"ContainerStarted","Data":"f870cea4a26b5ed590dd0021314adbc023ccdec6ec3174d62502209f0369cb93"} Nov 29 06:54:23 crc kubenswrapper[4943]: I1129 06:54:23.591538 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qhsdq" event={"ID":"c2c18b1d-2ac5-4321-a137-250557efd955","Type":"ContainerStarted","Data":"d011e00c59f8d0fdfcea566564b057d578ab39825ec081755ecb53691b579576"} Nov 29 06:54:24 crc kubenswrapper[4943]: I1129 06:54:24.603751 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7q88r" event={"ID":"f60f2450-9ce3-4b8c-b268-6a0aab1cb075","Type":"ContainerStarted","Data":"70d523e1fe2b2bbbfa62aea84bc3d266d89558337abd20b4cdaec4a8d163d499"} Nov 29 06:54:24 crc kubenswrapper[4943]: I1129 06:54:24.604063 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:24 crc kubenswrapper[4943]: I1129 06:54:24.628416 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-qhsdq" podStartSLOduration=6.628398393 podStartE2EDuration="6.628398393s" podCreationTimestamp="2025-11-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:54:24.62135511 +0000 UTC m=+1239.551443883" watchObservedRunningTime="2025-11-29 06:54:24.628398393 +0000 UTC m=+1239.558487146" Nov 29 06:54:26 crc kubenswrapper[4943]: I1129 06:54:26.628005 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7q88r" event={"ID":"f60f2450-9ce3-4b8c-b268-6a0aab1cb075","Type":"ContainerStarted","Data":"95a2e53a197eb07d3998df9a889d82199b0bc8ceb1978af933dfbf0b99085a84"} Nov 29 06:54:26 crc kubenswrapper[4943]: I1129 06:54:26.628383 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-7q88r" Nov 29 06:54:26 crc kubenswrapper[4943]: I1129 06:54:26.645521 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-7q88r" podStartSLOduration=8.645498433 podStartE2EDuration="8.645498433s" podCreationTimestamp="2025-11-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:54:26.643702789 +0000 UTC m=+1241.573791572" watchObservedRunningTime="2025-11-29 06:54:26.645498433 +0000 UTC m=+1241.575587186" Nov 29 06:54:32 crc kubenswrapper[4943]: I1129 06:54:32.613046 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:54:32 crc kubenswrapper[4943]: I1129 06:54:32.613649 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:54:37 crc kubenswrapper[4943]: E1129 06:54:37.286489 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/frr-rhel9@sha256:e5c5e7ca4ed54c9edba5dfa1d504bbe58016c2abdc872ebb8b26a628958e5a2a" Nov 29 06:54:37 crc kubenswrapper[4943]: E1129 06:54:37.287201 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:frr-k8s-webhook-server,Image:registry.redhat.io/openshift4/frr-rhel9@sha256:e5c5e7ca4ed54c9edba5dfa1d504bbe58016c2abdc872ebb8b26a628958e5a2a,Command:[/frr-k8s],Args:[--log-level=debug --webhook-mode=onlywebhook --disable-cert-rotation=true --namespace=$(NAMESPACE) --metrics-bind-address=:7572],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:monitoring,HostPort:0,ContainerPort:7572,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kf4kx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/metrics,Port:{1 0 monitoring},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/metrics,Port:{1 0 monitoring},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000700000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod frr-k8s-webhook-server-7fcb986d4-mt5gz_metallb-system(65c5f0ea-c60a-4abd-b490-8bccde64fbc2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 06:54:37 crc kubenswrapper[4943]: E1129 06:54:37.290031 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frr-k8s-webhook-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" podUID="65c5f0ea-c60a-4abd-b490-8bccde64fbc2" Nov 29 06:54:37 crc kubenswrapper[4943]: E1129 06:54:37.825990 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frr-k8s-webhook-server\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/frr-rhel9@sha256:e5c5e7ca4ed54c9edba5dfa1d504bbe58016c2abdc872ebb8b26a628958e5a2a\\\"\"" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" podUID="65c5f0ea-c60a-4abd-b490-8bccde64fbc2" Nov 29 06:54:39 crc kubenswrapper[4943]: I1129 06:54:39.527264 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-qhsdq" Nov 29 06:54:42 crc kubenswrapper[4943]: I1129 06:54:42.514806 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-7q88r" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.538749 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.539866 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.541546 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.542475 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.551354 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.553089 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-gz8hm" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.571634 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ds76\" (UniqueName: \"kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76\") pod \"openstack-operator-index-pkxbj\" (UID: \"0c523633-faf7-451d-bc45-b2e00fe05d85\") " pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.673495 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ds76\" (UniqueName: \"kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76\") pod \"openstack-operator-index-pkxbj\" (UID: \"0c523633-faf7-451d-bc45-b2e00fe05d85\") " pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.693525 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ds76\" (UniqueName: \"kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76\") pod \"openstack-operator-index-pkxbj\" (UID: \"0c523633-faf7-451d-bc45-b2e00fe05d85\") " pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:54:45 crc kubenswrapper[4943]: I1129 06:54:45.858017 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:54:48 crc kubenswrapper[4943]: I1129 06:54:48.652941 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:54:48 crc kubenswrapper[4943]: W1129 06:54:48.658817 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c523633_faf7_451d_bc45_b2e00fe05d85.slice/crio-d4d7d415e2106a444c4865714fdcc627e2a558f0c6779d9a4c5e87e55b4274c0 WatchSource:0}: Error finding container d4d7d415e2106a444c4865714fdcc627e2a558f0c6779d9a4c5e87e55b4274c0: Status 404 returned error can't find the container with id d4d7d415e2106a444c4865714fdcc627e2a558f0c6779d9a4c5e87e55b4274c0 Nov 29 06:54:49 crc kubenswrapper[4943]: I1129 06:54:49.068774 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" event={"ID":"65c5f0ea-c60a-4abd-b490-8bccde64fbc2","Type":"ContainerStarted","Data":"ebf0b3689971840ba7bc86b38bde2c921ddfdcabed127e017f625e404d26e482"} Nov 29 06:54:49 crc kubenswrapper[4943]: I1129 06:54:49.071474 4943 generic.go:334] "Generic (PLEG): container finished" podID="4ad8fae5-ebf3-406c-b971-f15b1978e82c" containerID="3cbcc6a8cd5dc5ab274d3d6f6ca9781075243fa8278248ce922d513e595f9636" exitCode=0 Nov 29 06:54:49 crc kubenswrapper[4943]: I1129 06:54:49.071667 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerDied","Data":"3cbcc6a8cd5dc5ab274d3d6f6ca9781075243fa8278248ce922d513e595f9636"} Nov 29 06:54:49 crc kubenswrapper[4943]: I1129 06:54:49.078296 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkxbj" event={"ID":"0c523633-faf7-451d-bc45-b2e00fe05d85","Type":"ContainerStarted","Data":"d4d7d415e2106a444c4865714fdcc627e2a558f0c6779d9a4c5e87e55b4274c0"} Nov 29 06:54:49 crc kubenswrapper[4943]: I1129 06:54:49.918204 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.085277 4943 generic.go:334] "Generic (PLEG): container finished" podID="4ad8fae5-ebf3-406c-b971-f15b1978e82c" containerID="878838654dda27ec22426515dd23e8c31e03ea05d8e92b1fa82a0173c2af2c49" exitCode=0 Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.085344 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerDied","Data":"878838654dda27ec22426515dd23e8c31e03ea05d8e92b1fa82a0173c2af2c49"} Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.086768 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.129419 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" podStartSLOduration=-9223372004.725382 podStartE2EDuration="32.129393587s" podCreationTimestamp="2025-11-29 06:54:18 +0000 UTC" firstStartedPulling="2025-11-29 06:54:19.246260098 +0000 UTC m=+1234.176348851" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 06:54:50.12337015 +0000 UTC m=+1265.053458903" watchObservedRunningTime="2025-11-29 06:54:50.129393587 +0000 UTC m=+1265.059482340" Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.530147 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-dqtsc"] Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.534446 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.541687 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-dqtsc"] Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.603968 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmjln\" (UniqueName: \"kubernetes.io/projected/c683af0c-166d-4bac-9c73-5d9d13d32f81-kube-api-access-hmjln\") pod \"openstack-operator-index-dqtsc\" (UID: \"c683af0c-166d-4bac-9c73-5d9d13d32f81\") " pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.705511 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmjln\" (UniqueName: \"kubernetes.io/projected/c683af0c-166d-4bac-9c73-5d9d13d32f81-kube-api-access-hmjln\") pod \"openstack-operator-index-dqtsc\" (UID: \"c683af0c-166d-4bac-9c73-5d9d13d32f81\") " pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:54:50 crc kubenswrapper[4943]: I1129 06:54:50.910487 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmjln\" (UniqueName: \"kubernetes.io/projected/c683af0c-166d-4bac-9c73-5d9d13d32f81-kube-api-access-hmjln\") pod \"openstack-operator-index-dqtsc\" (UID: \"c683af0c-166d-4bac-9c73-5d9d13d32f81\") " pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:54:51 crc kubenswrapper[4943]: I1129 06:54:51.093372 4943 generic.go:334] "Generic (PLEG): container finished" podID="4ad8fae5-ebf3-406c-b971-f15b1978e82c" containerID="1d89d6d7d814e2f1776718aacaa3e253b77ff9d6b26bee0cb6e58bbb4849c3f6" exitCode=0 Nov 29 06:54:51 crc kubenswrapper[4943]: I1129 06:54:51.093439 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerDied","Data":"1d89d6d7d814e2f1776718aacaa3e253b77ff9d6b26bee0cb6e58bbb4849c3f6"} Nov 29 06:54:51 crc kubenswrapper[4943]: I1129 06:54:51.154650 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:54:51 crc kubenswrapper[4943]: I1129 06:54:51.689807 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-dqtsc"] Nov 29 06:54:51 crc kubenswrapper[4943]: W1129 06:54:51.695319 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc683af0c_166d_4bac_9c73_5d9d13d32f81.slice/crio-b6a8611008deb128ed855ac32d970a4795a281ca8161a67986549a4f27467af4 WatchSource:0}: Error finding container b6a8611008deb128ed855ac32d970a4795a281ca8161a67986549a4f27467af4: Status 404 returned error can't find the container with id b6a8611008deb128ed855ac32d970a4795a281ca8161a67986549a4f27467af4 Nov 29 06:54:52 crc kubenswrapper[4943]: I1129 06:54:52.101165 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"1fefa8900bff3a778b890e0e3477264eaae83d631144b9b904f5742a0868ab46"} Nov 29 06:54:52 crc kubenswrapper[4943]: I1129 06:54:52.102069 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-dqtsc" event={"ID":"c683af0c-166d-4bac-9c73-5d9d13d32f81","Type":"ContainerStarted","Data":"b6a8611008deb128ed855ac32d970a4795a281ca8161a67986549a4f27467af4"} Nov 29 06:54:53 crc kubenswrapper[4943]: I1129 06:54:53.113070 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"4027f1e3fd00b7835c616d85982f11b92c4a2a8e25d9874bc728dfac3571e260"} Nov 29 06:54:55 crc kubenswrapper[4943]: I1129 06:54:55.131122 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"971316302ee3cab159d654236891470a5e39c9efdc14eb2dc300c814bfb0563e"} Nov 29 06:55:02 crc kubenswrapper[4943]: I1129 06:55:02.613264 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:55:02 crc kubenswrapper[4943]: I1129 06:55:02.613736 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:55:02 crc kubenswrapper[4943]: I1129 06:55:02.613779 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:55:02 crc kubenswrapper[4943]: I1129 06:55:02.614367 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:55:02 crc kubenswrapper[4943]: I1129 06:55:02.614475 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188" gracePeriod=600 Nov 29 06:55:05 crc kubenswrapper[4943]: I1129 06:55:05.192590 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"804408fe53733da4698463cc55e99c24c9bf7a0ee94ec8c619190e3dc933a8e6"} Nov 29 06:55:08 crc kubenswrapper[4943]: I1129 06:55:08.811154 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mt5gz" Nov 29 06:55:12 crc kubenswrapper[4943]: I1129 06:55:12.243342 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188" exitCode=0 Nov 29 06:55:12 crc kubenswrapper[4943]: I1129 06:55:12.243403 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188"} Nov 29 06:55:12 crc kubenswrapper[4943]: I1129 06:55:12.243781 4943 scope.go:117] "RemoveContainer" containerID="b2b3367625bf6bfa5e7fcdc538fe79e620be10e5e50d271ba5c28897d9e4459e" Nov 29 06:55:23 crc kubenswrapper[4943]: E1129 06:55:23.020765 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:55:23 crc kubenswrapper[4943]: E1129 06:55:23.021114 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying layer: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:55:23 crc kubenswrapper[4943]: E1129 06:55:23.021249 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hmjln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-operator-index-dqtsc_openstack-operators(c683af0c-166d-4bac-9c73-5d9d13d32f81): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 06:55:23 crc kubenswrapper[4943]: E1129 06:55:23.022437 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/openstack-operator-index-dqtsc" podUID="c683af0c-166d-4bac-9c73-5d9d13d32f81" Nov 29 06:55:23 crc kubenswrapper[4943]: E1129 06:55:23.316436 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1\\\"\"" pod="openstack-operators/openstack-operator-index-dqtsc" podUID="c683af0c-166d-4bac-9c73-5d9d13d32f81" Nov 29 06:55:24 crc kubenswrapper[4943]: I1129 06:55:24.324889 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"1df78daebd9c2da82ec651dbd849e45ab87d539b2ed4722755edd98aca28cafb"} Nov 29 06:55:24 crc kubenswrapper[4943]: I1129 06:55:24.327011 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245"} Nov 29 06:55:28 crc kubenswrapper[4943]: I1129 06:55:28.353316 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dt7vh" event={"ID":"4ad8fae5-ebf3-406c-b971-f15b1978e82c","Type":"ContainerStarted","Data":"6c5973a9d453022d8624f4736ae6920827f38f10ecc406e12b74a3bc560822fa"} Nov 29 06:55:29 crc kubenswrapper[4943]: I1129 06:55:29.359964 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:55:29 crc kubenswrapper[4943]: I1129 06:55:29.361936 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:55:29 crc kubenswrapper[4943]: I1129 06:55:29.381284 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dt7vh" podStartSLOduration=44.22577257 podStartE2EDuration="1m11.381260056s" podCreationTimestamp="2025-11-29 06:54:18 +0000 UTC" firstStartedPulling="2025-11-29 06:54:21.048333944 +0000 UTC m=+1235.978422697" lastFinishedPulling="2025-11-29 06:54:48.20382142 +0000 UTC m=+1263.133910183" observedRunningTime="2025-11-29 06:55:29.379505152 +0000 UTC m=+1304.309593925" watchObservedRunningTime="2025-11-29 06:55:29.381260056 +0000 UTC m=+1304.311348809" Nov 29 06:55:33 crc kubenswrapper[4943]: I1129 06:55:33.811977 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:55:33 crc kubenswrapper[4943]: I1129 06:55:33.849157 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dt7vh" Nov 29 06:56:04 crc kubenswrapper[4943]: E1129 06:56:04.973594 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:56:04 crc kubenswrapper[4943]: E1129 06:56:04.974113 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:56:04 crc kubenswrapper[4943]: E1129 06:56:04.974266 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9ds76,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-operator-index-pkxbj_openstack-operators(0c523633-faf7-451d-bc45-b2e00fe05d85): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 06:56:04 crc kubenswrapper[4943]: E1129 06:56:04.975479 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-operator-index-pkxbj" podUID="0c523633-faf7-451d-bc45-b2e00fe05d85" Nov 29 06:56:05 crc kubenswrapper[4943]: E1129 06:56:05.640052 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:56:05 crc kubenswrapper[4943]: E1129 06:56:05.640104 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:56:05 crc kubenswrapper[4943]: E1129 06:56:05.640229 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hmjln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-operator-index-dqtsc_openstack-operators(c683af0c-166d-4bac-9c73-5d9d13d32f81): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 06:56:05 crc kubenswrapper[4943]: E1129 06:56:05.641389 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-operator-index-dqtsc" podUID="c683af0c-166d-4bac-9c73-5d9d13d32f81" Nov 29 06:56:05 crc kubenswrapper[4943]: I1129 06:56:05.807634 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:56:05 crc kubenswrapper[4943]: I1129 06:56:05.846630 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ds76\" (UniqueName: \"kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76\") pod \"0c523633-faf7-451d-bc45-b2e00fe05d85\" (UID: \"0c523633-faf7-451d-bc45-b2e00fe05d85\") " Nov 29 06:56:05 crc kubenswrapper[4943]: I1129 06:56:05.851858 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76" (OuterVolumeSpecName: "kube-api-access-9ds76") pod "0c523633-faf7-451d-bc45-b2e00fe05d85" (UID: "0c523633-faf7-451d-bc45-b2e00fe05d85"). InnerVolumeSpecName "kube-api-access-9ds76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:56:05 crc kubenswrapper[4943]: I1129 06:56:05.947638 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ds76\" (UniqueName: \"kubernetes.io/projected/0c523633-faf7-451d-bc45-b2e00fe05d85-kube-api-access-9ds76\") on node \"crc\" DevicePath \"\"" Nov 29 06:56:06 crc kubenswrapper[4943]: I1129 06:56:06.582497 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkxbj" event={"ID":"0c523633-faf7-451d-bc45-b2e00fe05d85","Type":"ContainerDied","Data":"d4d7d415e2106a444c4865714fdcc627e2a558f0c6779d9a4c5e87e55b4274c0"} Nov 29 06:56:06 crc kubenswrapper[4943]: I1129 06:56:06.582535 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkxbj" Nov 29 06:56:06 crc kubenswrapper[4943]: I1129 06:56:06.634720 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:56:06 crc kubenswrapper[4943]: I1129 06:56:06.644994 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-pkxbj"] Nov 29 06:56:07 crc kubenswrapper[4943]: I1129 06:56:07.335006 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c523633-faf7-451d-bc45-b2e00fe05d85" path="/var/lib/kubelet/pods/0c523633-faf7-451d-bc45-b2e00fe05d85/volumes" Nov 29 06:56:17 crc kubenswrapper[4943]: E1129 06:56:17.331217 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.113:5001/openstack-k8s-operators/openstack-operator-index:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1\\\"\"" pod="openstack-operators/openstack-operator-index-dqtsc" podUID="c683af0c-166d-4bac-9c73-5d9d13d32f81" Nov 29 06:56:21 crc kubenswrapper[4943]: I1129 06:56:21.601220 4943 scope.go:117] "RemoveContainer" containerID="464f8e622bc989c4f6bff31b21b19d1fd275d6faaf681d03d6077c54c941ee77" Nov 29 06:56:31 crc kubenswrapper[4943]: I1129 06:56:31.331158 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 06:56:35 crc kubenswrapper[4943]: I1129 06:56:35.780458 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-dqtsc" event={"ID":"c683af0c-166d-4bac-9c73-5d9d13d32f81","Type":"ContainerStarted","Data":"35fcc2d2100c34ed6bc63e40954cd52980adae541a08a6cc301c396c425f4d67"} Nov 29 06:56:35 crc kubenswrapper[4943]: I1129 06:56:35.797126 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-dqtsc" podStartSLOduration=2.101518851 podStartE2EDuration="1m45.797072736s" podCreationTimestamp="2025-11-29 06:54:50 +0000 UTC" firstStartedPulling="2025-11-29 06:54:51.6974542 +0000 UTC m=+1266.627542953" lastFinishedPulling="2025-11-29 06:56:35.393008085 +0000 UTC m=+1370.323096838" observedRunningTime="2025-11-29 06:56:35.793844836 +0000 UTC m=+1370.723933589" watchObservedRunningTime="2025-11-29 06:56:35.797072736 +0000 UTC m=+1370.727161509" Nov 29 06:56:41 crc kubenswrapper[4943]: I1129 06:56:41.155053 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:56:41 crc kubenswrapper[4943]: I1129 06:56:41.155669 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:56:41 crc kubenswrapper[4943]: I1129 06:56:41.183818 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:56:41 crc kubenswrapper[4943]: I1129 06:56:41.845873 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-dqtsc" Nov 29 06:56:58 crc kubenswrapper[4943]: E1129 06:56:58.417885 4943 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.091s" Nov 29 06:57:21 crc kubenswrapper[4943]: I1129 06:57:21.640093 4943 scope.go:117] "RemoveContainer" containerID="d1fa560f5c6695efa79c2e9275e0df90ce767af1afeabff037fac5b319b1638d" Nov 29 06:57:32 crc kubenswrapper[4943]: I1129 06:57:32.613706 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:57:32 crc kubenswrapper[4943]: I1129 06:57:32.614265 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.120996 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq"] Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.122757 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.125080 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-645zp" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.138264 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq"] Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.168856 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.169091 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2xt9\" (UniqueName: \"kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.169206 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.270539 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2xt9\" (UniqueName: \"kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.270609 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.270645 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.271093 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.271374 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.294740 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2xt9\" (UniqueName: \"kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9\") pod \"e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.437828 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:57:54 crc kubenswrapper[4943]: I1129 06:57:54.627340 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq"] Nov 29 06:57:55 crc kubenswrapper[4943]: I1129 06:57:55.266695 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerStarted","Data":"9b6da8142e810317c505b8e1ec360f1a3edc1cfef7109bee3221b72baad837ba"} Nov 29 06:57:55 crc kubenswrapper[4943]: I1129 06:57:55.266741 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerStarted","Data":"b7b0029803097dc081d9eafacdc671904cf01469a02496e73ce6250b5cca3a36"} Nov 29 06:57:56 crc kubenswrapper[4943]: I1129 06:57:56.275615 4943 generic.go:334] "Generic (PLEG): container finished" podID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerID="9b6da8142e810317c505b8e1ec360f1a3edc1cfef7109bee3221b72baad837ba" exitCode=0 Nov 29 06:57:56 crc kubenswrapper[4943]: I1129 06:57:56.275685 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerDied","Data":"9b6da8142e810317c505b8e1ec360f1a3edc1cfef7109bee3221b72baad837ba"} Nov 29 06:58:02 crc kubenswrapper[4943]: I1129 06:58:02.613717 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:58:02 crc kubenswrapper[4943]: I1129 06:58:02.614647 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:58:19 crc kubenswrapper[4943]: E1129 06:58:19.041894 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-bundle:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:58:19 crc kubenswrapper[4943]: E1129 06:58:19.042467 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator-bundle:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:58:19 crc kubenswrapper[4943]: E1129 06:58:19.042667 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:pull,Image:38.102.83.113:5001/openstack-k8s-operators/openstack-operator-bundle:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j2xt9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_openstack-operators(0d6d1cb4-1e63-4910-9dbc-166a3a8c331d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 06:58:19 crc kubenswrapper[4943]: E1129 06:58:19.044710 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" Nov 29 06:58:19 crc kubenswrapper[4943]: E1129 06:58:19.456300 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.113:5001/openstack-k8s-operators/openstack-operator-bundle:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1\\\"\"" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" Nov 29 06:58:21 crc kubenswrapper[4943]: I1129 06:58:21.669512 4943 scope.go:117] "RemoveContainer" containerID="1c9915821b74531522fe1bda5d4845d4f509594ffc5c322b889de336043c5fa1" Nov 29 06:58:32 crc kubenswrapper[4943]: I1129 06:58:32.613357 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 06:58:32 crc kubenswrapper[4943]: I1129 06:58:32.614762 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 06:58:32 crc kubenswrapper[4943]: I1129 06:58:32.614825 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 06:58:32 crc kubenswrapper[4943]: I1129 06:58:32.615486 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 06:58:32 crc kubenswrapper[4943]: I1129 06:58:32.615555 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245" gracePeriod=600 Nov 29 06:58:33 crc kubenswrapper[4943]: I1129 06:58:33.536343 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245" exitCode=0 Nov 29 06:58:33 crc kubenswrapper[4943]: I1129 06:58:33.536396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245"} Nov 29 06:58:33 crc kubenswrapper[4943]: I1129 06:58:33.536432 4943 scope.go:117] "RemoveContainer" containerID="feaa2a1b84b1a23a15dcc59525a52a97d91a9f2f8df5c4a63426effa07b9b188" Nov 29 06:58:35 crc kubenswrapper[4943]: I1129 06:58:35.562422 4943 generic.go:334] "Generic (PLEG): container finished" podID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerID="4a20b89a25896c4879d22cb71e600a312ed86005faaf9dac44bd423d7acf376a" exitCode=0 Nov 29 06:58:35 crc kubenswrapper[4943]: I1129 06:58:35.562615 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerDied","Data":"4a20b89a25896c4879d22cb71e600a312ed86005faaf9dac44bd423d7acf376a"} Nov 29 06:58:35 crc kubenswrapper[4943]: I1129 06:58:35.567176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15"} Nov 29 06:58:36 crc kubenswrapper[4943]: I1129 06:58:36.583486 4943 generic.go:334] "Generic (PLEG): container finished" podID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerID="d906d95a2cee2879a7bec1ab9b2bb932b39c4cb7164fbf60c93fae33c759e759" exitCode=0 Nov 29 06:58:36 crc kubenswrapper[4943]: I1129 06:58:36.583595 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerDied","Data":"d906d95a2cee2879a7bec1ab9b2bb932b39c4cb7164fbf60c93fae33c759e759"} Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.854314 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.961818 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle\") pod \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.961921 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2xt9\" (UniqueName: \"kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9\") pod \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.961957 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util\") pod \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\" (UID: \"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d\") " Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.963174 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle" (OuterVolumeSpecName: "bundle") pod "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" (UID: "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.969286 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9" (OuterVolumeSpecName: "kube-api-access-j2xt9") pod "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" (UID: "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d"). InnerVolumeSpecName "kube-api-access-j2xt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:58:37 crc kubenswrapper[4943]: I1129 06:58:37.977170 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util" (OuterVolumeSpecName: "util") pod "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" (UID: "0d6d1cb4-1e63-4910-9dbc-166a3a8c331d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.064359 4943 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.064405 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2xt9\" (UniqueName: \"kubernetes.io/projected/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-kube-api-access-j2xt9\") on node \"crc\" DevicePath \"\"" Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.064421 4943 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d6d1cb4-1e63-4910-9dbc-166a3a8c331d-util\") on node \"crc\" DevicePath \"\"" Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.597741 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" event={"ID":"0d6d1cb4-1e63-4910-9dbc-166a3a8c331d","Type":"ContainerDied","Data":"b7b0029803097dc081d9eafacdc671904cf01469a02496e73ce6250b5cca3a36"} Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.597805 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq" Nov 29 06:58:38 crc kubenswrapper[4943]: I1129 06:58:38.597813 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7b0029803097dc081d9eafacdc671904cf01469a02496e73ce6250b5cca3a36" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.159793 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg"] Nov 29 06:58:42 crc kubenswrapper[4943]: E1129 06:58:42.160676 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="extract" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.160693 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="extract" Nov 29 06:58:42 crc kubenswrapper[4943]: E1129 06:58:42.160706 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="util" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.160714 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="util" Nov 29 06:58:42 crc kubenswrapper[4943]: E1129 06:58:42.160750 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="pull" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.160759 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="pull" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.160895 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d6d1cb4-1e63-4910-9dbc-166a3a8c331d" containerName="extract" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.161436 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.175925 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-4lvb5" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.212331 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg"] Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.223170 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rk8q\" (UniqueName: \"kubernetes.io/projected/0215659c-bbb4-43cd-a403-ecaa380a2224-kube-api-access-9rk8q\") pod \"openstack-operator-controller-operator-56cc9fd6f6-q77pg\" (UID: \"0215659c-bbb4-43cd-a403-ecaa380a2224\") " pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.324444 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rk8q\" (UniqueName: \"kubernetes.io/projected/0215659c-bbb4-43cd-a403-ecaa380a2224-kube-api-access-9rk8q\") pod \"openstack-operator-controller-operator-56cc9fd6f6-q77pg\" (UID: \"0215659c-bbb4-43cd-a403-ecaa380a2224\") " pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.359881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rk8q\" (UniqueName: \"kubernetes.io/projected/0215659c-bbb4-43cd-a403-ecaa380a2224-kube-api-access-9rk8q\") pod \"openstack-operator-controller-operator-56cc9fd6f6-q77pg\" (UID: \"0215659c-bbb4-43cd-a403-ecaa380a2224\") " pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.496871 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:58:42 crc kubenswrapper[4943]: I1129 06:58:42.996525 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg"] Nov 29 06:58:43 crc kubenswrapper[4943]: I1129 06:58:43.632061 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" event={"ID":"0215659c-bbb4-43cd-a403-ecaa380a2224","Type":"ContainerStarted","Data":"a901ee523ad5634c2ce8f96f616f5660b4d057cfc9bbca2fe0bb0364bfb65dd7"} Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.544950 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.547932 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.560837 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.640504 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.640595 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvdp7\" (UniqueName: \"kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.640823 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.742068 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.742154 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.742187 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvdp7\" (UniqueName: \"kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.742661 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.742708 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.760857 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvdp7\" (UniqueName: \"kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7\") pod \"redhat-operators-xpqzt\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:02 crc kubenswrapper[4943]: I1129 06:59:02.880305 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:06 crc kubenswrapper[4943]: E1129 06:59:06.952792 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:59:06 crc kubenswrapper[4943]: E1129 06:59:06.953595 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.113:5001/openstack-k8s-operators/openstack-operator:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1" Nov 29 06:59:06 crc kubenswrapper[4943]: E1129 06:59:06.954499 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:38.102.83.113:5001/openstack-k8s-operators/openstack-operator:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,Command:[/operator],Args:[--leader-elect --health-probe-bind-address=:8081],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:TEST_TOBIKO_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tobiko:current-podified,ValueFrom:nil,},EnvVar{Name:TEST_ANSIBLETEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ansible-tests:current-podified,ValueFrom:nil,},EnvVar{Name:TEST_HORIZONTEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizontest:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY,Value:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_OPERATOR_MANAGER_IMAGE_URL,Value:38.102.83.113:5001/openstack-k8s-operators/cinder-operator:019d68401352315d2d18b500118e2eb31a07fb26,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/infra-operator@sha256:09a6d0613ee2d3c1c809fc36c22678458ac271e0da87c970aec0a5339f5423f7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/ironic-operator@sha256:0f523b7e2fa9e86fef986acf07d0c42d5658c475d565f11eaea926ebffcb6530,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/manila-operator@sha256:ecf7be921850bdc04697ed1b332bab39ad2a64e4e45c2a445c04f9bae6ac61b5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_BAREMETAL_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:14cfad6ea2e7f7ecc4cb2aafceb9c61514b3d04b66668832d1e4ac3b19f1ab81,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_CLUSTER_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TELEMETRY_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_OPERATOR_MANAGER_IMAGE_URL,Value:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,ValueFrom:nil,},EnvVar{Name:OPENSTACK_RELEASE_VERSION,Value:0.5.0-1764397625,ValueFrom:nil,},EnvVar{Name:OPERATOR_IMAGE_URL,Value:38.102.83.113:5001/openstack-k8s-operators/openstack-operator:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:openstack-operator.v0.5.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{268435456 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9rk8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-operator-controller-operator-56cc9fd6f6-q77pg_openstack-operators(0215659c-bbb4-43cd-a403-ecaa380a2224): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 06:59:06 crc kubenswrapper[4943]: E1129 06:59:06.956279 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" podUID="0215659c-bbb4-43cd-a403-ecaa380a2224" Nov 29 06:59:07 crc kubenswrapper[4943]: I1129 06:59:07.143313 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:07 crc kubenswrapper[4943]: I1129 06:59:07.780364 4943 generic.go:334] "Generic (PLEG): container finished" podID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerID="1bb21d7cbce445406218428f6cc56279030ad1ed4150cb9eaa7b95686468131d" exitCode=0 Nov 29 06:59:07 crc kubenswrapper[4943]: I1129 06:59:07.780475 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerDied","Data":"1bb21d7cbce445406218428f6cc56279030ad1ed4150cb9eaa7b95686468131d"} Nov 29 06:59:07 crc kubenswrapper[4943]: I1129 06:59:07.780956 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerStarted","Data":"6a910c28b5d4cdf4e9e756e7e0374f868da24f0b58d5b49999f877397eb034ee"} Nov 29 06:59:07 crc kubenswrapper[4943]: E1129 06:59:07.782469 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.113:5001/openstack-k8s-operators/openstack-operator:3aa569ba0ab6c593fcdf83fcb2a7a1f3431918f1\\\"\"" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" podUID="0215659c-bbb4-43cd-a403-ecaa380a2224" Nov 29 06:59:09 crc kubenswrapper[4943]: I1129 06:59:09.795867 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerStarted","Data":"7dcbbb856977eaed61f2daea7f09065917c55c432e9aa5544676eb60056e58a2"} Nov 29 06:59:11 crc kubenswrapper[4943]: I1129 06:59:11.810120 4943 generic.go:334] "Generic (PLEG): container finished" podID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerID="7dcbbb856977eaed61f2daea7f09065917c55c432e9aa5544676eb60056e58a2" exitCode=0 Nov 29 06:59:11 crc kubenswrapper[4943]: I1129 06:59:11.810257 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerDied","Data":"7dcbbb856977eaed61f2daea7f09065917c55c432e9aa5544676eb60056e58a2"} Nov 29 06:59:25 crc kubenswrapper[4943]: I1129 06:59:25.898670 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerStarted","Data":"204e5e196c0b2916515c81f05ec4494ff5b4c16c2769de11cf90a4bdf5222657"} Nov 29 06:59:25 crc kubenswrapper[4943]: I1129 06:59:25.918663 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xpqzt" podStartSLOduration=6.759234458 podStartE2EDuration="23.91864612s" podCreationTimestamp="2025-11-29 06:59:02 +0000 UTC" firstStartedPulling="2025-11-29 06:59:07.781550052 +0000 UTC m=+1522.711638805" lastFinishedPulling="2025-11-29 06:59:24.940961704 +0000 UTC m=+1539.871050467" observedRunningTime="2025-11-29 06:59:25.915495293 +0000 UTC m=+1540.845584046" watchObservedRunningTime="2025-11-29 06:59:25.91864612 +0000 UTC m=+1540.848734873" Nov 29 06:59:26 crc kubenswrapper[4943]: I1129 06:59:26.910951 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" event={"ID":"0215659c-bbb4-43cd-a403-ecaa380a2224","Type":"ContainerStarted","Data":"64bc269654e6fd699b717edf8369848c95e1259f679536190267d4f15041ccd1"} Nov 29 06:59:26 crc kubenswrapper[4943]: I1129 06:59:26.911172 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:59:26 crc kubenswrapper[4943]: I1129 06:59:26.945703 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" podStartSLOduration=1.968824613 podStartE2EDuration="44.945678705s" podCreationTimestamp="2025-11-29 06:58:42 +0000 UTC" firstStartedPulling="2025-11-29 06:58:43.013628689 +0000 UTC m=+1497.943717452" lastFinishedPulling="2025-11-29 06:59:25.990482791 +0000 UTC m=+1540.920571544" observedRunningTime="2025-11-29 06:59:26.941224376 +0000 UTC m=+1541.871313149" watchObservedRunningTime="2025-11-29 06:59:26.945678705 +0000 UTC m=+1541.875767458" Nov 29 06:59:32 crc kubenswrapper[4943]: I1129 06:59:32.500395 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-56cc9fd6f6-q77pg" Nov 29 06:59:32 crc kubenswrapper[4943]: I1129 06:59:32.881036 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:32 crc kubenswrapper[4943]: I1129 06:59:32.881179 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:32 crc kubenswrapper[4943]: I1129 06:59:32.924684 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:32 crc kubenswrapper[4943]: I1129 06:59:32.988173 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:33 crc kubenswrapper[4943]: I1129 06:59:33.158814 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:34 crc kubenswrapper[4943]: I1129 06:59:34.964647 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xpqzt" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="registry-server" containerID="cri-o://204e5e196c0b2916515c81f05ec4494ff5b4c16c2769de11cf90a4bdf5222657" gracePeriod=2 Nov 29 06:59:35 crc kubenswrapper[4943]: I1129 06:59:35.972934 4943 generic.go:334] "Generic (PLEG): container finished" podID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerID="204e5e196c0b2916515c81f05ec4494ff5b4c16c2769de11cf90a4bdf5222657" exitCode=0 Nov 29 06:59:35 crc kubenswrapper[4943]: I1129 06:59:35.973010 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerDied","Data":"204e5e196c0b2916515c81f05ec4494ff5b4c16c2769de11cf90a4bdf5222657"} Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.325146 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.435426 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvdp7\" (UniqueName: \"kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7\") pod \"929bbef2-6452-4572-b7d9-c0ad516f4045\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.435565 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content\") pod \"929bbef2-6452-4572-b7d9-c0ad516f4045\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.435629 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities\") pod \"929bbef2-6452-4572-b7d9-c0ad516f4045\" (UID: \"929bbef2-6452-4572-b7d9-c0ad516f4045\") " Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.436676 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities" (OuterVolumeSpecName: "utilities") pod "929bbef2-6452-4572-b7d9-c0ad516f4045" (UID: "929bbef2-6452-4572-b7d9-c0ad516f4045"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.442526 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7" (OuterVolumeSpecName: "kube-api-access-wvdp7") pod "929bbef2-6452-4572-b7d9-c0ad516f4045" (UID: "929bbef2-6452-4572-b7d9-c0ad516f4045"). InnerVolumeSpecName "kube-api-access-wvdp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.537918 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.537952 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvdp7\" (UniqueName: \"kubernetes.io/projected/929bbef2-6452-4572-b7d9-c0ad516f4045-kube-api-access-wvdp7\") on node \"crc\" DevicePath \"\"" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.551808 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "929bbef2-6452-4572-b7d9-c0ad516f4045" (UID: "929bbef2-6452-4572-b7d9-c0ad516f4045"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.639877 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929bbef2-6452-4572-b7d9-c0ad516f4045-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.996833 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpqzt" event={"ID":"929bbef2-6452-4572-b7d9-c0ad516f4045","Type":"ContainerDied","Data":"6a910c28b5d4cdf4e9e756e7e0374f868da24f0b58d5b49999f877397eb034ee"} Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.996897 4943 scope.go:117] "RemoveContainer" containerID="204e5e196c0b2916515c81f05ec4494ff5b4c16c2769de11cf90a4bdf5222657" Nov 29 06:59:38 crc kubenswrapper[4943]: I1129 06:59:38.997048 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpqzt" Nov 29 06:59:39 crc kubenswrapper[4943]: I1129 06:59:39.021550 4943 scope.go:117] "RemoveContainer" containerID="7dcbbb856977eaed61f2daea7f09065917c55c432e9aa5544676eb60056e58a2" Nov 29 06:59:39 crc kubenswrapper[4943]: I1129 06:59:39.038727 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:39 crc kubenswrapper[4943]: I1129 06:59:39.045503 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xpqzt"] Nov 29 06:59:39 crc kubenswrapper[4943]: I1129 06:59:39.054986 4943 scope.go:117] "RemoveContainer" containerID="1bb21d7cbce445406218428f6cc56279030ad1ed4150cb9eaa7b95686468131d" Nov 29 06:59:39 crc kubenswrapper[4943]: I1129 06:59:39.336072 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" path="/var/lib/kubelet/pods/929bbef2-6452-4572-b7d9-c0ad516f4045/volumes" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.989442 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz"] Nov 29 06:59:51 crc kubenswrapper[4943]: E1129 06:59:51.990759 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="extract-utilities" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.990783 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="extract-utilities" Nov 29 06:59:51 crc kubenswrapper[4943]: E1129 06:59:51.990807 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="registry-server" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.990815 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="registry-server" Nov 29 06:59:51 crc kubenswrapper[4943]: E1129 06:59:51.990838 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="extract-content" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.990847 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="extract-content" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.991011 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="929bbef2-6452-4572-b7d9-c0ad516f4045" containerName="registry-server" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.991970 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.994028 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-5767c" Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.997207 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz"] Nov 29 06:59:51 crc kubenswrapper[4943]: I1129 06:59:51.998410 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.000025 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-tzlqv" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.010848 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.018463 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.019847 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.022361 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-lmvdt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.038472 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.055809 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.073671 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.074864 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.080390 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-475tk" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.081172 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.086730 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.087735 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.094477 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-ml8wp" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.095273 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.125485 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.127034 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.129634 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jzqmj" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.160041 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.164971 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.165945 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.170348 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.170554 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4jc4t" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182074 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182327 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfxrc\" (UniqueName: \"kubernetes.io/projected/5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55-kube-api-access-xfxrc\") pod \"designate-operator-controller-manager-78b4bc895b-ncg7s\" (UID: \"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182373 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcwkk\" (UniqueName: \"kubernetes.io/projected/b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7-kube-api-access-jcwkk\") pod \"horizon-operator-controller-manager-68c6d99b8f-6hb4z\" (UID: \"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182397 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182417 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqhnw\" (UniqueName: \"kubernetes.io/projected/c9815594-e69f-411a-9bf1-b0c064eb5180-kube-api-access-xqhnw\") pod \"barbican-operator-controller-manager-7d9dfd778-94jbz\" (UID: \"c9815594-e69f-411a-9bf1-b0c064eb5180\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182443 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvp2f\" (UniqueName: \"kubernetes.io/projected/0ec91d76-10f1-458f-b999-6212f13f5e18-kube-api-access-pvp2f\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182473 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhtgw\" (UniqueName: \"kubernetes.io/projected/d7ba2d7b-5840-4cc7-95d9-1953d8c0250b-kube-api-access-dhtgw\") pod \"heat-operator-controller-manager-5f64f6f8bb-rp427\" (UID: \"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182541 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt52k\" (UniqueName: \"kubernetes.io/projected/5cfbb650-258a-4cf5-8ada-c721fa5aee9a-kube-api-access-bt52k\") pod \"cinder-operator-controller-manager-546795bfb5-9dvgz\" (UID: \"5cfbb650-258a-4cf5-8ada-c721fa5aee9a\") " pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.182583 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nrqq\" (UniqueName: \"kubernetes.io/projected/868d0e27-4b0a-4cb0-a2a6-13d58e257c8f-kube-api-access-2nrqq\") pod \"glance-operator-controller-manager-668d9c48b9-knfbt\" (UID: \"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.183102 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.189235 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.197776 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-5ghz4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.198457 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.208475 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.212034 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.213758 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-mtdk9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.244719 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.270559 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.271744 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.274195 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-p7d6b" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.285913 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt52k\" (UniqueName: \"kubernetes.io/projected/5cfbb650-258a-4cf5-8ada-c721fa5aee9a-kube-api-access-bt52k\") pod \"cinder-operator-controller-manager-546795bfb5-9dvgz\" (UID: \"5cfbb650-258a-4cf5-8ada-c721fa5aee9a\") " pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.285947 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nrqq\" (UniqueName: \"kubernetes.io/projected/868d0e27-4b0a-4cb0-a2a6-13d58e257c8f-kube-api-access-2nrqq\") pod \"glance-operator-controller-manager-668d9c48b9-knfbt\" (UID: \"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.285993 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfxrc\" (UniqueName: \"kubernetes.io/projected/5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55-kube-api-access-xfxrc\") pod \"designate-operator-controller-manager-78b4bc895b-ncg7s\" (UID: \"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.286012 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcwkk\" (UniqueName: \"kubernetes.io/projected/b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7-kube-api-access-jcwkk\") pod \"horizon-operator-controller-manager-68c6d99b8f-6hb4z\" (UID: \"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.286030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.286186 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqhnw\" (UniqueName: \"kubernetes.io/projected/c9815594-e69f-411a-9bf1-b0c064eb5180-kube-api-access-xqhnw\") pod \"barbican-operator-controller-manager-7d9dfd778-94jbz\" (UID: \"c9815594-e69f-411a-9bf1-b0c064eb5180\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.286213 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvp2f\" (UniqueName: \"kubernetes.io/projected/0ec91d76-10f1-458f-b999-6212f13f5e18-kube-api-access-pvp2f\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.286244 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhtgw\" (UniqueName: \"kubernetes.io/projected/d7ba2d7b-5840-4cc7-95d9-1953d8c0250b-kube-api-access-dhtgw\") pod \"heat-operator-controller-manager-5f64f6f8bb-rp427\" (UID: \"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.286771 4943 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.286958 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert podName:0ec91d76-10f1-458f-b999-6212f13f5e18 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:52.786803546 +0000 UTC m=+1567.716892299 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert") pod "infra-operator-controller-manager-57548d458d-kg4v7" (UID: "0ec91d76-10f1-458f-b999-6212f13f5e18") : secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.293998 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.295604 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.299833 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-r4ph8" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.322504 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.331482 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvp2f\" (UniqueName: \"kubernetes.io/projected/0ec91d76-10f1-458f-b999-6212f13f5e18-kube-api-access-pvp2f\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.332849 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhtgw\" (UniqueName: \"kubernetes.io/projected/d7ba2d7b-5840-4cc7-95d9-1953d8c0250b-kube-api-access-dhtgw\") pod \"heat-operator-controller-manager-5f64f6f8bb-rp427\" (UID: \"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.338525 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfxrc\" (UniqueName: \"kubernetes.io/projected/5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55-kube-api-access-xfxrc\") pod \"designate-operator-controller-manager-78b4bc895b-ncg7s\" (UID: \"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.341670 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt52k\" (UniqueName: \"kubernetes.io/projected/5cfbb650-258a-4cf5-8ada-c721fa5aee9a-kube-api-access-bt52k\") pod \"cinder-operator-controller-manager-546795bfb5-9dvgz\" (UID: \"5cfbb650-258a-4cf5-8ada-c721fa5aee9a\") " pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.341967 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.345206 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqhnw\" (UniqueName: \"kubernetes.io/projected/c9815594-e69f-411a-9bf1-b0c064eb5180-kube-api-access-xqhnw\") pod \"barbican-operator-controller-manager-7d9dfd778-94jbz\" (UID: \"c9815594-e69f-411a-9bf1-b0c064eb5180\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.349936 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcwkk\" (UniqueName: \"kubernetes.io/projected/b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7-kube-api-access-jcwkk\") pod \"horizon-operator-controller-manager-68c6d99b8f-6hb4z\" (UID: \"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.354265 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.355258 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nrqq\" (UniqueName: \"kubernetes.io/projected/868d0e27-4b0a-4cb0-a2a6-13d58e257c8f-kube-api-access-2nrqq\") pod \"glance-operator-controller-manager-668d9c48b9-knfbt\" (UID: \"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.358073 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.366596 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.367979 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.368608 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.371859 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-8c87z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.377622 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-88mqd" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.384642 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.385839 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.387057 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q66x7\" (UniqueName: \"kubernetes.io/projected/c35fc2d0-5f79-4edf-86ec-d6f1add18551-kube-api-access-q66x7\") pod \"manila-operator-controller-manager-6546668bfd-wchm4\" (UID: \"c35fc2d0-5f79-4edf-86ec-d6f1add18551\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.387116 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgz92\" (UniqueName: \"kubernetes.io/projected/0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f-kube-api-access-rgz92\") pod \"keystone-operator-controller-manager-546d4bdf48-v6d5z\" (UID: \"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.387136 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twjl4\" (UniqueName: \"kubernetes.io/projected/ac03d0ce-8e35-479d-9f8d-06e05955d2ce-kube-api-access-twjl4\") pod \"ironic-operator-controller-manager-6c548fd776-zqc94\" (UID: \"ac03d0ce-8e35-479d-9f8d-06e05955d2ce\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.388043 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-bjh5b" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.389458 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.418416 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.431505 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.444171 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.447980 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.457607 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.467689 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.470483 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.472247 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vkhmf" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.487991 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgz92\" (UniqueName: \"kubernetes.io/projected/0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f-kube-api-access-rgz92\") pod \"keystone-operator-controller-manager-546d4bdf48-v6d5z\" (UID: \"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488044 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twjl4\" (UniqueName: \"kubernetes.io/projected/ac03d0ce-8e35-479d-9f8d-06e05955d2ce-kube-api-access-twjl4\") pod \"ironic-operator-controller-manager-6c548fd776-zqc94\" (UID: \"ac03d0ce-8e35-479d-9f8d-06e05955d2ce\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488115 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vzps\" (UniqueName: \"kubernetes.io/projected/980cdf9e-3763-4aca-a92a-1f4ca61c1501-kube-api-access-6vzps\") pod \"octavia-operator-controller-manager-998648c74-v2kbm\" (UID: \"980cdf9e-3763-4aca-a92a-1f4ca61c1501\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488143 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqxtx\" (UniqueName: \"kubernetes.io/projected/74ea0cf0-4b53-4342-9ea4-c2e4db748104-kube-api-access-gqxtx\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-jq6kn\" (UID: \"74ea0cf0-4b53-4342-9ea4-c2e4db748104\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488187 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzkx7\" (UniqueName: \"kubernetes.io/projected/b979c1f8-20ad-4694-98e5-674738e37f4c-kube-api-access-fzkx7\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tg7s9\" (UID: \"b979c1f8-20ad-4694-98e5-674738e37f4c\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488212 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49r79\" (UniqueName: \"kubernetes.io/projected/f98a9e94-a4bf-4980-9ab0-efd202b4ee30-kube-api-access-49r79\") pod \"nova-operator-controller-manager-697bc559fc-hrvxb\" (UID: \"f98a9e94-a4bf-4980-9ab0-efd202b4ee30\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.488269 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q66x7\" (UniqueName: \"kubernetes.io/projected/c35fc2d0-5f79-4edf-86ec-d6f1add18551-kube-api-access-q66x7\") pod \"manila-operator-controller-manager-6546668bfd-wchm4\" (UID: \"c35fc2d0-5f79-4edf-86ec-d6f1add18551\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.501291 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.503412 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.505804 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-4vhv9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.506005 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.513839 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.522347 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twjl4\" (UniqueName: \"kubernetes.io/projected/ac03d0ce-8e35-479d-9f8d-06e05955d2ce-kube-api-access-twjl4\") pod \"ironic-operator-controller-manager-6c548fd776-zqc94\" (UID: \"ac03d0ce-8e35-479d-9f8d-06e05955d2ce\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.522389 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgz92\" (UniqueName: \"kubernetes.io/projected/0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f-kube-api-access-rgz92\") pod \"keystone-operator-controller-manager-546d4bdf48-v6d5z\" (UID: \"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.525797 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q66x7\" (UniqueName: \"kubernetes.io/projected/c35fc2d0-5f79-4edf-86ec-d6f1add18551-kube-api-access-q66x7\") pod \"manila-operator-controller-manager-6546668bfd-wchm4\" (UID: \"c35fc2d0-5f79-4edf-86ec-d6f1add18551\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.534518 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.536308 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.541857 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-72jmr" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.578354 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.580216 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.580646 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.590390 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vzps\" (UniqueName: \"kubernetes.io/projected/980cdf9e-3763-4aca-a92a-1f4ca61c1501-kube-api-access-6vzps\") pod \"octavia-operator-controller-manager-998648c74-v2kbm\" (UID: \"980cdf9e-3763-4aca-a92a-1f4ca61c1501\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.590431 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqxtx\" (UniqueName: \"kubernetes.io/projected/74ea0cf0-4b53-4342-9ea4-c2e4db748104-kube-api-access-gqxtx\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-jq6kn\" (UID: \"74ea0cf0-4b53-4342-9ea4-c2e4db748104\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.590469 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzkx7\" (UniqueName: \"kubernetes.io/projected/b979c1f8-20ad-4694-98e5-674738e37f4c-kube-api-access-fzkx7\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tg7s9\" (UID: \"b979c1f8-20ad-4694-98e5-674738e37f4c\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.590489 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49r79\" (UniqueName: \"kubernetes.io/projected/f98a9e94-a4bf-4980-9ab0-efd202b4ee30-kube-api-access-49r79\") pod \"nova-operator-controller-manager-697bc559fc-hrvxb\" (UID: \"f98a9e94-a4bf-4980-9ab0-efd202b4ee30\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.590549 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8fdz\" (UniqueName: \"kubernetes.io/projected/d4923779-fef3-444b-9276-5ca610c71fd4-kube-api-access-l8fdz\") pod \"ovn-operator-controller-manager-b6456fdb6-bwq9n\" (UID: \"d4923779-fef3-444b-9276-5ca610c71fd4\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.613015 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.615154 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.616032 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.618918 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.619693 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-vs4zz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.620661 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vzps\" (UniqueName: \"kubernetes.io/projected/980cdf9e-3763-4aca-a92a-1f4ca61c1501-kube-api-access-6vzps\") pod \"octavia-operator-controller-manager-998648c74-v2kbm\" (UID: \"980cdf9e-3763-4aca-a92a-1f4ca61c1501\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.621252 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqxtx\" (UniqueName: \"kubernetes.io/projected/74ea0cf0-4b53-4342-9ea4-c2e4db748104-kube-api-access-gqxtx\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-jq6kn\" (UID: \"74ea0cf0-4b53-4342-9ea4-c2e4db748104\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.626032 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49r79\" (UniqueName: \"kubernetes.io/projected/f98a9e94-a4bf-4980-9ab0-efd202b4ee30-kube-api-access-49r79\") pod \"nova-operator-controller-manager-697bc559fc-hrvxb\" (UID: \"f98a9e94-a4bf-4980-9ab0-efd202b4ee30\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.626268 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.628098 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.632662 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzkx7\" (UniqueName: \"kubernetes.io/projected/b979c1f8-20ad-4694-98e5-674738e37f4c-kube-api-access-fzkx7\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tg7s9\" (UID: \"b979c1f8-20ad-4694-98e5-674738e37f4c\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.645896 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.647224 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.649517 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-kg2dk" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.667453 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.693243 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8fdz\" (UniqueName: \"kubernetes.io/projected/d4923779-fef3-444b-9276-5ca610c71fd4-kube-api-access-l8fdz\") pod \"ovn-operator-controller-manager-b6456fdb6-bwq9n\" (UID: \"d4923779-fef3-444b-9276-5ca610c71fd4\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.693506 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf87c\" (UniqueName: \"kubernetes.io/projected/882924cc-0259-4c98-b40a-1c02eeadaa09-kube-api-access-wf87c\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.693664 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.693818 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cg2t5\" (UniqueName: \"kubernetes.io/projected/b60c121d-d0c2-4e2e-be92-6da802d74dd6-kube-api-access-cg2t5\") pod \"placement-operator-controller-manager-78f8948974-jgxw4\" (UID: \"b60c121d-d0c2-4e2e-be92-6da802d74dd6\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.698901 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-jsgws"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.699854 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.701734 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-dx4jk" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.711832 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8fdz\" (UniqueName: \"kubernetes.io/projected/d4923779-fef3-444b-9276-5ca610c71fd4-kube-api-access-l8fdz\") pod \"ovn-operator-controller-manager-b6456fdb6-bwq9n\" (UID: \"d4923779-fef3-444b-9276-5ca610c71fd4\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.714398 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-jsgws"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.763088 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.779303 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.793835 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.795019 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.795015 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npmxc\" (UniqueName: \"kubernetes.io/projected/c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0-kube-api-access-npmxc\") pod \"swift-operator-controller-manager-5f8c65bbfc-vbk9x\" (UID: \"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.795828 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cg2t5\" (UniqueName: \"kubernetes.io/projected/b60c121d-d0c2-4e2e-be92-6da802d74dd6-kube-api-access-cg2t5\") pod \"placement-operator-controller-manager-78f8948974-jgxw4\" (UID: \"b60c121d-d0c2-4e2e-be92-6da802d74dd6\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.795926 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmsbj\" (UniqueName: \"kubernetes.io/projected/66d0d275-3a0b-45a1-8b57-dc7ec4559888-kube-api-access-cmsbj\") pod \"telemetry-operator-controller-manager-76cc84c6bb-ll829\" (UID: \"66d0d275-3a0b-45a1-8b57-dc7ec4559888\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.795980 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf87c\" (UniqueName: \"kubernetes.io/projected/882924cc-0259-4c98-b40a-1c02eeadaa09-kube-api-access-wf87c\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.796062 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.796103 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.796266 4943 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.796325 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert podName:0ec91d76-10f1-458f-b999-6212f13f5e18 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:53.796305064 +0000 UTC m=+1568.726393807 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert") pod "infra-operator-controller-manager-57548d458d-kg4v7" (UID: "0ec91d76-10f1-458f-b999-6212f13f5e18") : secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.796799 4943 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.796847 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert podName:882924cc-0259-4c98-b40a-1c02eeadaa09 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:53.296831517 +0000 UTC m=+1568.226920270 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" (UID: "882924cc-0259-4c98-b40a-1c02eeadaa09") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.799160 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-67zpt" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.812679 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.813585 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.814213 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.821072 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cg2t5\" (UniqueName: \"kubernetes.io/projected/b60c121d-d0c2-4e2e-be92-6da802d74dd6-kube-api-access-cg2t5\") pod \"placement-operator-controller-manager-78f8948974-jgxw4\" (UID: \"b60c121d-d0c2-4e2e-be92-6da802d74dd6\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.821408 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf87c\" (UniqueName: \"kubernetes.io/projected/882924cc-0259-4c98-b40a-1c02eeadaa09-kube-api-access-wf87c\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.840998 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.862109 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.863271 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.863727 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.868489 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.869015 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.869122 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-p6wnl" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.869175 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897005 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897255 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npmxc\" (UniqueName: \"kubernetes.io/projected/c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0-kube-api-access-npmxc\") pod \"swift-operator-controller-manager-5f8c65bbfc-vbk9x\" (UID: \"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897294 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pthf\" (UniqueName: \"kubernetes.io/projected/4602188b-0cc4-4b1e-80e8-a2f40fd43da0-kube-api-access-9pthf\") pod \"watcher-operator-controller-manager-769dc69bc-f4nk6\" (UID: \"4602188b-0cc4-4b1e-80e8-a2f40fd43da0\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897339 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897358 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmsbj\" (UniqueName: \"kubernetes.io/projected/66d0d275-3a0b-45a1-8b57-dc7ec4559888-kube-api-access-cmsbj\") pod \"telemetry-operator-controller-manager-76cc84c6bb-ll829\" (UID: \"66d0d275-3a0b-45a1-8b57-dc7ec4559888\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897390 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897411 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tghcn\" (UniqueName: \"kubernetes.io/projected/7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3-kube-api-access-tghcn\") pod \"test-operator-controller-manager-5854674fcc-jsgws\" (UID: \"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.897499 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtkgq\" (UniqueName: \"kubernetes.io/projected/e75ee72e-f7fe-45a2-81f1-06ed23649f89-kube-api-access-wtkgq\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.919674 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.920745 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.921107 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmsbj\" (UniqueName: \"kubernetes.io/projected/66d0d275-3a0b-45a1-8b57-dc7ec4559888-kube-api-access-cmsbj\") pod \"telemetry-operator-controller-manager-76cc84c6bb-ll829\" (UID: \"66d0d275-3a0b-45a1-8b57-dc7ec4559888\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.936646 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.936963 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-vkr4m" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.970757 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427"] Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.975802 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.976180 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npmxc\" (UniqueName: \"kubernetes.io/projected/c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0-kube-api-access-npmxc\") pod \"swift-operator-controller-manager-5f8c65bbfc-vbk9x\" (UID: \"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.998370 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pthf\" (UniqueName: \"kubernetes.io/projected/4602188b-0cc4-4b1e-80e8-a2f40fd43da0-kube-api-access-9pthf\") pod \"watcher-operator-controller-manager-769dc69bc-f4nk6\" (UID: \"4602188b-0cc4-4b1e-80e8-a2f40fd43da0\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.998434 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.998470 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.998492 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tghcn\" (UniqueName: \"kubernetes.io/projected/7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3-kube-api-access-tghcn\") pod \"test-operator-controller-manager-5854674fcc-jsgws\" (UID: \"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 06:59:52 crc kubenswrapper[4943]: I1129 06:59:52.998540 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtkgq\" (UniqueName: \"kubernetes.io/projected/e75ee72e-f7fe-45a2-81f1-06ed23649f89-kube-api-access-wtkgq\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.998819 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.998857 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:53.498844246 +0000 UTC m=+1568.428932999 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.998938 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 06:59:52 crc kubenswrapper[4943]: E1129 06:59:52.998957 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:53.498951298 +0000 UTC m=+1568.429040051 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.022690 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tghcn\" (UniqueName: \"kubernetes.io/projected/7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3-kube-api-access-tghcn\") pod \"test-operator-controller-manager-5854674fcc-jsgws\" (UID: \"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.026444 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtkgq\" (UniqueName: \"kubernetes.io/projected/e75ee72e-f7fe-45a2-81f1-06ed23649f89-kube-api-access-wtkgq\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.028343 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pthf\" (UniqueName: \"kubernetes.io/projected/4602188b-0cc4-4b1e-80e8-a2f40fd43da0-kube-api-access-9pthf\") pod \"watcher-operator-controller-manager-769dc69bc-f4nk6\" (UID: \"4602188b-0cc4-4b1e-80e8-a2f40fd43da0\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.029730 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.100159 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbxtf\" (UniqueName: \"kubernetes.io/projected/83a6e478-8cc1-4061-b06f-e0b9faf51ede-kube-api-access-bbxtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvhlh\" (UID: \"83a6e478-8cc1-4061-b06f-e0b9faf51ede\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.102695 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" event={"ID":"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b","Type":"ContainerStarted","Data":"15d8a55970886dd065c2be80f3f6adb1424e099a10e29ce09472131b2de38fbd"} Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.131825 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.136683 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.139423 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 06:59:53 crc kubenswrapper[4943]: W1129 06:59:53.193817 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8f2b43c_dd75_44aa_b6b5_365ab01ec0e7.slice/crio-0ab38b6c1247498d916d352ac2b4f04eaf065bcf78921e12b363a6aebdd9b99a WatchSource:0}: Error finding container 0ab38b6c1247498d916d352ac2b4f04eaf065bcf78921e12b363a6aebdd9b99a: Status 404 returned error can't find the container with id 0ab38b6c1247498d916d352ac2b4f04eaf065bcf78921e12b363a6aebdd9b99a Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.201793 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbxtf\" (UniqueName: \"kubernetes.io/projected/83a6e478-8cc1-4061-b06f-e0b9faf51ede-kube-api-access-bbxtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvhlh\" (UID: \"83a6e478-8cc1-4061-b06f-e0b9faf51ede\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" Nov 29 06:59:53 crc kubenswrapper[4943]: W1129 06:59:53.205817 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b3a7a05_9e00_4f3d_be4d_9536f7ee2e55.slice/crio-981bd420ed9f88d92e4ead270cd0aed58a0b4676a8387c28119bf1f14c5154cf WatchSource:0}: Error finding container 981bd420ed9f88d92e4ead270cd0aed58a0b4676a8387c28119bf1f14c5154cf: Status 404 returned error can't find the container with id 981bd420ed9f88d92e4ead270cd0aed58a0b4676a8387c28119bf1f14c5154cf Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.225096 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbxtf\" (UniqueName: \"kubernetes.io/projected/83a6e478-8cc1-4061-b06f-e0b9faf51ede-kube-api-access-bbxtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvhlh\" (UID: \"83a6e478-8cc1-4061-b06f-e0b9faf51ede\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.251205 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.307505 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.307726 4943 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.307783 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert podName:882924cc-0259-4c98-b40a-1c02eeadaa09 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:54.30776414 +0000 UTC m=+1569.237852883 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" (UID: "882924cc-0259-4c98-b40a-1c02eeadaa09") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.312212 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.324024 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.517470 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.517867 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.518058 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.518113 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:54.518095473 +0000 UTC m=+1569.448184226 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.518425 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.518488 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:54.518472092 +0000 UTC m=+1569.448560845 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.745619 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.784042 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz"] Nov 29 06:59:53 crc kubenswrapper[4943]: W1129 06:59:53.810836 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cfbb650_258a_4cf5_8ada_c721fa5aee9a.slice/crio-6afd1b0bc1688ca153c0696b07c57897fe3f66c39f34a433a013a3a81ff5b7d9 WatchSource:0}: Error finding container 6afd1b0bc1688ca153c0696b07c57897fe3f66c39f34a433a013a3a81ff5b7d9: Status 404 returned error can't find the container with id 6afd1b0bc1688ca153c0696b07c57897fe3f66c39f34a433a013a3a81ff5b7d9 Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.821834 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.822473 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.822641 4943 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: E1129 06:59:53.822684 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert podName:0ec91d76-10f1-458f-b999-6212f13f5e18 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:55.822670259 +0000 UTC m=+1570.752759012 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert") pod "infra-operator-controller-manager-57548d458d-kg4v7" (UID: "0ec91d76-10f1-458f-b999-6212f13f5e18") : secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:53 crc kubenswrapper[4943]: W1129 06:59:53.879258 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9815594_e69f_411a_9bf1_b0c064eb5180.slice/crio-8367e9250a669dc2da5ed8e7b49634c2a317faec4e5a44996f52077bcf695b6f WatchSource:0}: Error finding container 8367e9250a669dc2da5ed8e7b49634c2a317faec4e5a44996f52077bcf695b6f: Status 404 returned error can't find the container with id 8367e9250a669dc2da5ed8e7b49634c2a317faec4e5a44996f52077bcf695b6f Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.906634 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.930178 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.946260 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4"] Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.963188 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb"] Nov 29 06:59:53 crc kubenswrapper[4943]: W1129 06:59:53.977129 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74ea0cf0_4b53_4342_9ea4_c2e4db748104.slice/crio-95c04c1db8714e5f8dd32a59ad68212d4f6ca7a12b0f74d6b92199ae758cb973 WatchSource:0}: Error finding container 95c04c1db8714e5f8dd32a59ad68212d4f6ca7a12b0f74d6b92199ae758cb973: Status 404 returned error can't find the container with id 95c04c1db8714e5f8dd32a59ad68212d4f6ca7a12b0f74d6b92199ae758cb973 Nov 29 06:59:53 crc kubenswrapper[4943]: I1129 06:59:53.981214 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn"] Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.041671 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n"] Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.060674 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm"] Nov 29 06:59:54 crc kubenswrapper[4943]: W1129 06:59:54.061141 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4923779_fef3_444b_9276_5ca610c71fd4.slice/crio-3c7c980e87399aaf6a2cc815e8fd1a08894835243d5638d71ab7c655c0946f13 WatchSource:0}: Error finding container 3c7c980e87399aaf6a2cc815e8fd1a08894835243d5638d71ab7c655c0946f13: Status 404 returned error can't find the container with id 3c7c980e87399aaf6a2cc815e8fd1a08894835243d5638d71ab7c655c0946f13 Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.065834 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94"] Nov 29 06:59:54 crc kubenswrapper[4943]: W1129 06:59:54.066102 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e1a6ed4_d7ec_4703_8acb_16ea3f99d8f3.slice/crio-2b0668615920e7e5f90d28607593a688ca960f39ce89b29c970b83a9033894be WatchSource:0}: Error finding container 2b0668615920e7e5f90d28607593a688ca960f39ce89b29c970b83a9033894be: Status 404 returned error can't find the container with id 2b0668615920e7e5f90d28607593a688ca960f39ce89b29c970b83a9033894be Nov 29 06:59:54 crc kubenswrapper[4943]: W1129 06:59:54.067371 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac03d0ce_8e35_479d_9f8d_06e05955d2ce.slice/crio-de20c97647a412a86ccf2e651ed4d877b3d1ab70e7603daec3015b8b4e8ef1c8 WatchSource:0}: Error finding container de20c97647a412a86ccf2e651ed4d877b3d1ab70e7603daec3015b8b4e8ef1c8: Status 404 returned error can't find the container with id de20c97647a412a86ccf2e651ed4d877b3d1ab70e7603daec3015b8b4e8ef1c8 Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.071675 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829"] Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.083654 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cmsbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-ll829_openstack-operators(66d0d275-3a0b-45a1-8b57-dc7ec4559888): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.084381 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9pthf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-f4nk6_openstack-operators(4602188b-0cc4-4b1e-80e8-a2f40fd43da0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.084657 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vzps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-v2kbm_openstack-operators(980cdf9e-3763-4aca-a92a-1f4ca61c1501): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.085763 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cmsbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-ll829_openstack-operators(66d0d275-3a0b-45a1-8b57-dc7ec4559888): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.088843 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" podUID="66d0d275-3a0b-45a1-8b57-dc7ec4559888" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.089102 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9pthf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-f4nk6_openstack-operators(4602188b-0cc4-4b1e-80e8-a2f40fd43da0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.089214 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vzps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-v2kbm_openstack-operators(980cdf9e-3763-4aca-a92a-1f4ca61c1501): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.090484 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" podUID="980cdf9e-3763-4aca-a92a-1f4ca61c1501" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.090509 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" podUID="4602188b-0cc4-4b1e-80e8-a2f40fd43da0" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.097002 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-jsgws"] Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.107816 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6"] Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.115397 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" event={"ID":"d4923779-fef3-444b-9276-5ca610c71fd4","Type":"ContainerStarted","Data":"3c7c980e87399aaf6a2cc815e8fd1a08894835243d5638d71ab7c655c0946f13"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.118484 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" event={"ID":"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7","Type":"ContainerStarted","Data":"0ab38b6c1247498d916d352ac2b4f04eaf065bcf78921e12b363a6aebdd9b99a"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.119536 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" event={"ID":"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55","Type":"ContainerStarted","Data":"981bd420ed9f88d92e4ead270cd0aed58a0b4676a8387c28119bf1f14c5154cf"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.121192 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" event={"ID":"66d0d275-3a0b-45a1-8b57-dc7ec4559888","Type":"ContainerStarted","Data":"da1d384d5c8384ffb5e63e5d3859137a9b1ee9e439017f4cd42a18bbcd4a0c3e"} Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.122809 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" podUID="66d0d275-3a0b-45a1-8b57-dc7ec4559888" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.123204 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" event={"ID":"b979c1f8-20ad-4694-98e5-674738e37f4c","Type":"ContainerStarted","Data":"283cae49c2d1847a4bc3a9c845df9b8c97faa2c2aeb453baf9cd7826f5d178fe"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.125403 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" event={"ID":"c9815594-e69f-411a-9bf1-b0c064eb5180","Type":"ContainerStarted","Data":"8367e9250a669dc2da5ed8e7b49634c2a317faec4e5a44996f52077bcf695b6f"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.127125 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" event={"ID":"b60c121d-d0c2-4e2e-be92-6da802d74dd6","Type":"ContainerStarted","Data":"4e56aabc1cdd736962bc47463a75e9b30810be1d606941c72a3b9b28658971da"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.128752 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" event={"ID":"4602188b-0cc4-4b1e-80e8-a2f40fd43da0","Type":"ContainerStarted","Data":"3cc394aae920c298e90ac4e8ce112dcf45a6354b47062d63272ecdc1323c7a3a"} Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.130363 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" podUID="4602188b-0cc4-4b1e-80e8-a2f40fd43da0" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.130982 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" event={"ID":"ac03d0ce-8e35-479d-9f8d-06e05955d2ce","Type":"ContainerStarted","Data":"de20c97647a412a86ccf2e651ed4d877b3d1ab70e7603daec3015b8b4e8ef1c8"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.131840 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" event={"ID":"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f","Type":"ContainerStarted","Data":"34b7097ae9e4dfcf30dcdbb72a4a9c5da61481619792a780fc39659eeebace5f"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.132668 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" event={"ID":"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f","Type":"ContainerStarted","Data":"3da9d1fd05bbf96584c5d135d2a0707bd61ffc45b46b02a5da11d97bcc99b4fc"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.133517 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" event={"ID":"74ea0cf0-4b53-4342-9ea4-c2e4db748104","Type":"ContainerStarted","Data":"95c04c1db8714e5f8dd32a59ad68212d4f6ca7a12b0f74d6b92199ae758cb973"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.137720 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" event={"ID":"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3","Type":"ContainerStarted","Data":"2b0668615920e7e5f90d28607593a688ca960f39ce89b29c970b83a9033894be"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.141157 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" event={"ID":"980cdf9e-3763-4aca-a92a-1f4ca61c1501","Type":"ContainerStarted","Data":"17a5f4ca5374bc6fd1167bb0930faf3e24f4b687eb177875d11a7bebf11e6455"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.142743 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" event={"ID":"f98a9e94-a4bf-4980-9ab0-efd202b4ee30","Type":"ContainerStarted","Data":"f5f3c9261129a93d8fc2118f1208ffcd47afc6ccccf1c0b4038fe373236e4993"} Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.143016 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" podUID="980cdf9e-3763-4aca-a92a-1f4ca61c1501" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.143966 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" event={"ID":"c35fc2d0-5f79-4edf-86ec-d6f1add18551","Type":"ContainerStarted","Data":"2afc1441185ce6c439bdd39c71039ae80c3c191499c9063d3b612fcd96e50a33"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.145250 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" event={"ID":"5cfbb650-258a-4cf5-8ada-c721fa5aee9a","Type":"ContainerStarted","Data":"6afd1b0bc1688ca153c0696b07c57897fe3f66c39f34a433a013a3a81ff5b7d9"} Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.177383 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh"] Nov 29 06:59:54 crc kubenswrapper[4943]: W1129 06:59:54.177923 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83a6e478_8cc1_4061_b06f_e0b9faf51ede.slice/crio-bafaf9e1e87bddfdd401836a0b7dfeca75451781d665ef21023071abcef9de09 WatchSource:0}: Error finding container bafaf9e1e87bddfdd401836a0b7dfeca75451781d665ef21023071abcef9de09: Status 404 returned error can't find the container with id bafaf9e1e87bddfdd401836a0b7dfeca75451781d665ef21023071abcef9de09 Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.180210 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bbxtf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-fvhlh_openstack-operators(83a6e478-8cc1-4061-b06f-e0b9faf51ede): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.181443 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podUID="83a6e478-8cc1-4061-b06f-e0b9faf51ede" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.214941 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x"] Nov 29 06:59:54 crc kubenswrapper[4943]: W1129 06:59:54.217839 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8c7d6b1_e72c_4dae_ba96_1e87c2a54fa0.slice/crio-a2539614399aecb1a5ff4cefa1f7e729f34a53e287e17bd15769c5ad20eb48e0 WatchSource:0}: Error finding container a2539614399aecb1a5ff4cefa1f7e729f34a53e287e17bd15769c5ad20eb48e0: Status 404 returned error can't find the container with id a2539614399aecb1a5ff4cefa1f7e729f34a53e287e17bd15769c5ad20eb48e0 Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.219731 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-npmxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-vbk9x_openstack-operators(c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.222507 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-npmxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-vbk9x_openstack-operators(c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.223785 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" podUID="c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.334005 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.334207 4943 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.334293 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert podName:882924cc-0259-4c98-b40a-1c02eeadaa09 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:56.334272159 +0000 UTC m=+1571.264360912 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" (UID: "882924cc-0259-4c98-b40a-1c02eeadaa09") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.535487 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:54 crc kubenswrapper[4943]: I1129 06:59:54.535833 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.535652 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.536070 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:56.536052583 +0000 UTC m=+1571.466141336 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.535905 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 06:59:54 crc kubenswrapper[4943]: E1129 06:59:54.536196 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:56.536188616 +0000 UTC m=+1571.466277369 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 06:59:55 crc kubenswrapper[4943]: I1129 06:59:55.151215 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" event={"ID":"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0","Type":"ContainerStarted","Data":"a2539614399aecb1a5ff4cefa1f7e729f34a53e287e17bd15769c5ad20eb48e0"} Nov 29 06:59:55 crc kubenswrapper[4943]: I1129 06:59:55.152154 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" event={"ID":"83a6e478-8cc1-4061-b06f-e0b9faf51ede","Type":"ContainerStarted","Data":"bafaf9e1e87bddfdd401836a0b7dfeca75451781d665ef21023071abcef9de09"} Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.152898 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" podUID="c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0" Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.153427 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" podUID="4602188b-0cc4-4b1e-80e8-a2f40fd43da0" Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.153519 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" podUID="66d0d275-3a0b-45a1-8b57-dc7ec4559888" Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.153599 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podUID="83a6e478-8cc1-4061-b06f-e0b9faf51ede" Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.153663 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" podUID="980cdf9e-3763-4aca-a92a-1f4ca61c1501" Nov 29 06:59:55 crc kubenswrapper[4943]: I1129 06:59:55.852199 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.852381 4943 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:55 crc kubenswrapper[4943]: E1129 06:59:55.853056 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert podName:0ec91d76-10f1-458f-b999-6212f13f5e18 nodeName:}" failed. No retries permitted until 2025-11-29 06:59:59.853031772 +0000 UTC m=+1574.783120525 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert") pod "infra-operator-controller-manager-57548d458d-kg4v7" (UID: "0ec91d76-10f1-458f-b999-6212f13f5e18") : secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.161090 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podUID="83a6e478-8cc1-4061-b06f-e0b9faf51ede" Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.161369 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" podUID="c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0" Nov 29 06:59:56 crc kubenswrapper[4943]: I1129 06:59:56.359509 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.359671 4943 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.359731 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert podName:882924cc-0259-4c98-b40a-1c02eeadaa09 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:00.359713401 +0000 UTC m=+1575.289802164 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" (UID: "882924cc-0259-4c98-b40a-1c02eeadaa09") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: I1129 06:59:56.563552 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:56 crc kubenswrapper[4943]: I1129 06:59:56.563643 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.563771 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.563791 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.563848 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:00.563829992 +0000 UTC m=+1575.493918745 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 06:59:56 crc kubenswrapper[4943]: E1129 06:59:56.563865 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:00.563858793 +0000 UTC m=+1575.493947546 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 06:59:59 crc kubenswrapper[4943]: I1129 06:59:59.912176 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 06:59:59 crc kubenswrapper[4943]: E1129 06:59:59.912384 4943 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 06:59:59 crc kubenswrapper[4943]: E1129 06:59:59.912477 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert podName:0ec91d76-10f1-458f-b999-6212f13f5e18 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:07.912455887 +0000 UTC m=+1582.842544640 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert") pod "infra-operator-controller-manager-57548d458d-kg4v7" (UID: "0ec91d76-10f1-458f-b999-6212f13f5e18") : secret "infra-operator-webhook-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.144116 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm"] Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.145203 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.148900 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.149153 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.150462 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm"] Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.224513 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g9r2\" (UniqueName: \"kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.224785 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.224974 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.326313 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.326430 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g9r2\" (UniqueName: \"kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.326451 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.327534 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.335200 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.346409 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g9r2\" (UniqueName: \"kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2\") pod \"collect-profiles-29406660-bz8wm\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.427557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.427742 4943 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.428033 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert podName:882924cc-0259-4c98-b40a-1c02eeadaa09 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:08.428014093 +0000 UTC m=+1583.358102846 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" (UID: "882924cc-0259-4c98-b40a-1c02eeadaa09") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.477405 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.630946 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.631136 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.631389 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:08.631372065 +0000 UTC m=+1583.561460818 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: I1129 07:00:00.631557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.631805 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 07:00:00 crc kubenswrapper[4943]: E1129 07:00:00.631902 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:08.631884838 +0000 UTC m=+1583.561973581 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 07:00:07 crc kubenswrapper[4943]: I1129 07:00:07.930015 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 07:00:07 crc kubenswrapper[4943]: I1129 07:00:07.934605 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ec91d76-10f1-458f-b999-6212f13f5e18-cert\") pod \"infra-operator-controller-manager-57548d458d-kg4v7\" (UID: \"0ec91d76-10f1-458f-b999-6212f13f5e18\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.081979 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.437009 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.449357 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/882924cc-0259-4c98-b40a-1c02eeadaa09-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw\" (UID: \"882924cc-0259-4c98-b40a-1c02eeadaa09\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.479348 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.641460 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:08 crc kubenswrapper[4943]: I1129 07:00:08.641532 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:08 crc kubenswrapper[4943]: E1129 07:00:08.641678 4943 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 07:00:08 crc kubenswrapper[4943]: E1129 07:00:08.641822 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:24.641802471 +0000 UTC m=+1599.571891224 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "metrics-server-cert" not found Nov 29 07:00:08 crc kubenswrapper[4943]: E1129 07:00:08.641743 4943 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 07:00:08 crc kubenswrapper[4943]: E1129 07:00:08.641922 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs podName:e75ee72e-f7fe-45a2-81f1-06ed23649f89 nodeName:}" failed. No retries permitted until 2025-11-29 07:00:24.641901344 +0000 UTC m=+1599.571990097 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs") pod "openstack-operator-controller-manager-cb46f9b59-7249w" (UID: "e75ee72e-f7fe-45a2-81f1-06ed23649f89") : secret "webhook-server-cert" not found Nov 29 07:00:23 crc kubenswrapper[4943]: E1129 07:00:23.630877 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429" Nov 29 07:00:23 crc kubenswrapper[4943]: E1129 07:00:23.632456 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dhtgw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-rp427_openstack-operators(d7ba2d7b-5840-4cc7-95d9-1953d8c0250b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:23 crc kubenswrapper[4943]: E1129 07:00:23.986770 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501: Get \"https://quay.io/v2/openstack-k8s-operators/keystone-operator/blobs/sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501\": context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3" Nov 29 07:00:23 crc kubenswrapper[4943]: E1129 07:00:23.986976 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rgz92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-546d4bdf48-v6d5z_openstack-operators(0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501: Get \"https://quay.io/v2/openstack-k8s-operators/keystone-operator/blobs/sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501\": context canceled" logger="UnhandledError" Nov 29 07:00:24 crc kubenswrapper[4943]: I1129 07:00:24.668812 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:24 crc kubenswrapper[4943]: I1129 07:00:24.668884 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:24 crc kubenswrapper[4943]: I1129 07:00:24.675394 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-webhook-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:24 crc kubenswrapper[4943]: I1129 07:00:24.676160 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e75ee72e-f7fe-45a2-81f1-06ed23649f89-metrics-certs\") pod \"openstack-operator-controller-manager-cb46f9b59-7249w\" (UID: \"e75ee72e-f7fe-45a2-81f1-06ed23649f89\") " pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:24 crc kubenswrapper[4943]: I1129 07:00:24.794305 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:00:26 crc kubenswrapper[4943]: E1129 07:00:26.100531 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5" Nov 29 07:00:26 crc kubenswrapper[4943]: E1129 07:00:26.101075 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jcwkk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-6hb4z_openstack-operators(b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:32 crc kubenswrapper[4943]: E1129 07:00:32.927226 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Nov 29 07:00:32 crc kubenswrapper[4943]: E1129 07:00:32.928088 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cg2t5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-jgxw4_openstack-operators(b60c121d-d0c2-4e2e-be92-6da802d74dd6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:37 crc kubenswrapper[4943]: E1129 07:00:37.761234 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427: Get \"http://38.102.83.113:5001/v2/openstack-k8s-operators/cinder-operator/blobs/sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427\": context canceled" image="38.102.83.113:5001/openstack-k8s-operators/cinder-operator:019d68401352315d2d18b500118e2eb31a07fb26" Nov 29 07:00:37 crc kubenswrapper[4943]: E1129 07:00:37.761723 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = reading blob sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427: Get \"http://38.102.83.113:5001/v2/openstack-k8s-operators/cinder-operator/blobs/sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427\": context canceled" image="38.102.83.113:5001/openstack-k8s-operators/cinder-operator:019d68401352315d2d18b500118e2eb31a07fb26" Nov 29 07:00:37 crc kubenswrapper[4943]: E1129 07:00:37.761861 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.113:5001/openstack-k8s-operators/cinder-operator:019d68401352315d2d18b500118e2eb31a07fb26,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bt52k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-546795bfb5-9dvgz_openstack-operators(5cfbb650-258a-4cf5-8ada-c721fa5aee9a): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427: Get \"http://38.102.83.113:5001/v2/openstack-k8s-operators/cinder-operator/blobs/sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427\": context canceled" logger="UnhandledError" Nov 29 07:00:38 crc kubenswrapper[4943]: E1129 07:00:38.237878 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85" Nov 29 07:00:38 crc kubenswrapper[4943]: E1129 07:00:38.238287 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xfxrc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-78b4bc895b-ncg7s_openstack-operators(5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:43 crc kubenswrapper[4943]: E1129 07:00:43.229816 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:0f523b7e2fa9e86fef986acf07d0c42d5658c475d565f11eaea926ebffcb6530" Nov 29 07:00:43 crc kubenswrapper[4943]: E1129 07:00:43.230602 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:0f523b7e2fa9e86fef986acf07d0c42d5658c475d565f11eaea926ebffcb6530,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-twjl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-6c548fd776-zqc94_openstack-operators(ac03d0ce-8e35-479d-9f8d-06e05955d2ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:45 crc kubenswrapper[4943]: E1129 07:00:45.021842 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172" Nov 29 07:00:45 crc kubenswrapper[4943]: E1129 07:00:45.022174 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2nrqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-668d9c48b9-knfbt_openstack-operators(868d0e27-4b0a-4cb0-a2a6-13d58e257c8f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:45 crc kubenswrapper[4943]: E1129 07:00:45.235308 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Nov 29 07:00:45 crc kubenswrapper[4943]: E1129 07:00:45.235490 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49r79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-hrvxb_openstack-operators(f98a9e94-a4bf-4980-9ab0-efd202b4ee30): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:00:48 crc kubenswrapper[4943]: E1129 07:00:48.455769 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Nov 29 07:00:48 crc kubenswrapper[4943]: E1129 07:00:48.456446 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l8fdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-bwq9n_openstack-operators(d4923779-fef3-444b-9276-5ca610c71fd4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:02 crc kubenswrapper[4943]: I1129 07:01:02.613856 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:01:02 crc kubenswrapper[4943]: I1129 07:01:02.614459 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:01:18 crc kubenswrapper[4943]: E1129 07:01:18.796135 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d" Nov 29 07:01:18 crc kubenswrapper[4943]: E1129 07:01:18.797146 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-npmxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-vbk9x_openstack-operators(c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:32 crc kubenswrapper[4943]: I1129 07:01:32.613023 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:01:32 crc kubenswrapper[4943]: I1129 07:01:32.613478 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:01:42 crc kubenswrapper[4943]: E1129 07:01:42.128772 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea" Nov 29 07:01:42 crc kubenswrapper[4943]: E1129 07:01:42.129348 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xqhnw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7d9dfd778-94jbz_openstack-operators(c9815594-e69f-411a-9bf1-b0c064eb5180): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:42 crc kubenswrapper[4943]: I1129 07:01:42.132474 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:01:50 crc kubenswrapper[4943]: E1129 07:01:50.481820 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385" Nov 29 07:01:50 crc kubenswrapper[4943]: E1129 07:01:50.484115 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cmsbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-ll829_openstack-operators(66d0d275-3a0b-45a1-8b57-dc7ec4559888): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.326968 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.327180 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9pthf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-f4nk6_openstack-operators(4602188b-0cc4-4b1e-80e8-a2f40fd43da0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.514745 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.515159 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jcwkk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-6hb4z_openstack-operators(b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.516408 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" podUID="b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.876355 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.876699 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bbxtf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-fvhlh_openstack-operators(83a6e478-8cc1-4061-b06f-e0b9faf51ede): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:51 crc kubenswrapper[4943]: E1129 07:01:51.877994 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podUID="83a6e478-8cc1-4061-b06f-e0b9faf51ede" Nov 29 07:01:54 crc kubenswrapper[4943]: E1129 07:01:54.902133 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:54 crc kubenswrapper[4943]: E1129 07:01:54.902348 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cg2t5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-jgxw4_openstack-operators(b60c121d-d0c2-4e2e-be92-6da802d74dd6): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:54 crc kubenswrapper[4943]: E1129 07:01:54.903513 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" podUID="b60c121d-d0c2-4e2e-be92-6da802d74dd6" Nov 29 07:01:54 crc kubenswrapper[4943]: E1129 07:01:54.981679 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7" Nov 29 07:01:54 crc kubenswrapper[4943]: E1129 07:01:54.982223 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fzkx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-tg7s9_openstack-operators(b979c1f8-20ad-4694-98e5-674738e37f4c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:55 crc kubenswrapper[4943]: E1129 07:01:55.476139 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:55 crc kubenswrapper[4943]: E1129 07:01:55.476686 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bt52k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-546795bfb5-9dvgz_openstack-operators(5cfbb650-258a-4cf5-8ada-c721fa5aee9a): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:55 crc kubenswrapper[4943]: E1129 07:01:55.478416 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427: Get \\\"http://38.102.83.113:5001/v2/openstack-k8s-operators/cinder-operator/blobs/sha256:3de201b53a8c49a88b3456e8e6942e1258d876892fe9e3a6d3327b40da0cc427\\\": context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" podUID="5cfbb650-258a-4cf5-8ada-c721fa5aee9a" Nov 29 07:01:55 crc kubenswrapper[4943]: E1129 07:01:55.492672 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Nov 29 07:01:55 crc kubenswrapper[4943]: E1129 07:01:55.492876 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gqxtx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-jq6kn_openstack-operators(74ea0cf0-4b53-4342-9ea4-c2e4db748104): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:56 crc kubenswrapper[4943]: E1129 07:01:56.230261 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:ecf7be921850bdc04697ed1b332bab39ad2a64e4e45c2a445c04f9bae6ac61b5" Nov 29 07:01:56 crc kubenswrapper[4943]: E1129 07:01:56.230502 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:ecf7be921850bdc04697ed1b332bab39ad2a64e4e45c2a445c04f9bae6ac61b5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q66x7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-6546668bfd-wchm4_openstack-operators(c35fc2d0-5f79-4edf-86ec-d6f1add18551): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:56 crc kubenswrapper[4943]: E1129 07:01:56.993282 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Nov 29 07:01:56 crc kubenswrapper[4943]: E1129 07:01:56.993489 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vzps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-v2kbm_openstack-operators(980cdf9e-3763-4aca-a92a-1f4ca61c1501): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.116661 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.117130 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-twjl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-6c548fd776-zqc94_openstack-operators(ac03d0ce-8e35-479d-9f8d-06e05955d2ce): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.118352 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" podUID="ac03d0ce-8e35-479d-9f8d-06e05955d2ce" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.122914 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.123049 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49r79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-hrvxb_openstack-operators(f98a9e94-a4bf-4980-9ab0-efd202b4ee30): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.123234 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.123291 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2nrqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-668d9c48b9-knfbt_openstack-operators(868d0e27-4b0a-4cb0-a2a6-13d58e257c8f): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.124471 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" podUID="868d0e27-4b0a-4cb0-a2a6-13d58e257c8f" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.124524 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" podUID="f98a9e94-a4bf-4980-9ab0-efd202b4ee30" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.142335 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.142474 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l8fdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-bwq9n_openstack-operators(d4923779-fef3-444b-9276-5ca610c71fd4): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:01:58 crc kubenswrapper[4943]: E1129 07:01:58.143684 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" podUID="d4923779-fef3-444b-9276-5ca610c71fd4" Nov 29 07:01:58 crc kubenswrapper[4943]: I1129 07:01:58.359134 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm"] Nov 29 07:02:02 crc kubenswrapper[4943]: I1129 07:02:02.613430 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:02:02 crc kubenswrapper[4943]: I1129 07:02:02.613768 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:02:02 crc kubenswrapper[4943]: I1129 07:02:02.613818 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:02:02 crc kubenswrapper[4943]: I1129 07:02:02.614363 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:02:02 crc kubenswrapper[4943]: I1129 07:02:02.614414 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" gracePeriod=600 Nov 29 07:02:05 crc kubenswrapper[4943]: E1129 07:02:05.337711 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podUID="83a6e478-8cc1-4061-b06f-e0b9faf51ede" Nov 29 07:02:06 crc kubenswrapper[4943]: E1129 07:02:06.928325 4943 log.go:32] "ListImages with filter from image service failed" err="rpc error: code = DeadlineExceeded desc = context deadline exceeded" filter="nil" Nov 29 07:02:06 crc kubenswrapper[4943]: E1129 07:02:06.928413 4943 kuberuntime_image.go:117] "Failed to list images" err="rpc error: code = DeadlineExceeded desc = context deadline exceeded" Nov 29 07:02:06 crc kubenswrapper[4943]: I1129 07:02:06.928427 4943 image_gc_manager.go:222] "Failed to update image list" err="rpc error: code = DeadlineExceeded desc = context deadline exceeded" Nov 29 07:02:09 crc kubenswrapper[4943]: E1129 07:02:09.661800 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94" Nov 29 07:02:09 crc kubenswrapper[4943]: E1129 07:02:09.662503 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tghcn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-jsgws_openstack-operators(7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:02:10 crc kubenswrapper[4943]: I1129 07:02:10.009948 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" exitCode=0 Nov 29 07:02:10 crc kubenswrapper[4943]: I1129 07:02:10.009991 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15"} Nov 29 07:02:10 crc kubenswrapper[4943]: I1129 07:02:10.010033 4943 scope.go:117] "RemoveContainer" containerID="6bdc0d48c9b1a39aaaa8872104ad9af5eda5e1f91200620e07605efda9f99245" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.005160 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.008207 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.014386 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.103548 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.103665 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk9mm\" (UniqueName: \"kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.103699 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.205000 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.205079 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk9mm\" (UniqueName: \"kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.205102 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.205652 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.206067 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.230092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk9mm\" (UniqueName: \"kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm\") pod \"redhat-marketplace-4pllj\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: I1129 07:02:11.324546 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:02:11 crc kubenswrapper[4943]: E1129 07:02:11.851590 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:11 crc kubenswrapper[4943]: E1129 07:02:11.852198 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xfxrc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-78b4bc895b-ncg7s_openstack-operators(5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:11 crc kubenswrapper[4943]: E1129 07:02:11.853662 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" podUID="5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55" Nov 29 07:02:11 crc kubenswrapper[4943]: W1129 07:02:11.872911 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8324b1a2_5aa0_4ed0_b6d2_91fad49082a7.slice/crio-4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5 WatchSource:0}: Error finding container 4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5: Status 404 returned error can't find the container with id 4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5 Nov 29 07:02:12 crc kubenswrapper[4943]: I1129 07:02:12.040650 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" event={"ID":"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7","Type":"ContainerStarted","Data":"4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5"} Nov 29 07:02:12 crc kubenswrapper[4943]: I1129 07:02:12.290160 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw"] Nov 29 07:02:12 crc kubenswrapper[4943]: I1129 07:02:12.340580 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7"] Nov 29 07:02:12 crc kubenswrapper[4943]: E1129 07:02:12.654179 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:02:12 crc kubenswrapper[4943]: I1129 07:02:12.852546 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w"] Nov 29 07:02:13 crc kubenswrapper[4943]: I1129 07:02:13.056521 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" event={"ID":"882924cc-0259-4c98-b40a-1c02eeadaa09","Type":"ContainerStarted","Data":"f74eb02e43c8105a2aa2f26551beb0f7bf9f70145e6bb83fe34858d870dbe0ba"} Nov 29 07:02:13 crc kubenswrapper[4943]: I1129 07:02:13.056976 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:02:13 crc kubenswrapper[4943]: E1129 07:02:13.057212 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.391328 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.393810 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.405711 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.457268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.457359 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.457401 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7nbk\" (UniqueName: \"kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.559045 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.559106 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.559162 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7nbk\" (UniqueName: \"kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.560249 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.560279 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.583406 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7nbk\" (UniqueName: \"kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk\") pod \"community-operators-8m6j4\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:14 crc kubenswrapper[4943]: I1129 07:02:14.723194 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:02:19 crc kubenswrapper[4943]: W1129 07:02:19.111506 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ec91d76_10f1_458f_b999_6212f13f5e18.slice/crio-6f920c7193e5c83de6c41f5c8aa0ddceb4e38616607644e7d7d094ce14f9b027 WatchSource:0}: Error finding container 6f920c7193e5c83de6c41f5c8aa0ddceb4e38616607644e7d7d094ce14f9b027: Status 404 returned error can't find the container with id 6f920c7193e5c83de6c41f5c8aa0ddceb4e38616607644e7d7d094ce14f9b027 Nov 29 07:02:20 crc kubenswrapper[4943]: I1129 07:02:20.102991 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" event={"ID":"0ec91d76-10f1-458f-b999-6212f13f5e18","Type":"ContainerStarted","Data":"6f920c7193e5c83de6c41f5c8aa0ddceb4e38616607644e7d7d094ce14f9b027"} Nov 29 07:02:20 crc kubenswrapper[4943]: W1129 07:02:20.200044 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode75ee72e_f7fe_45a2_81f1_06ed23649f89.slice/crio-6acfefc5ad1f3cd024a246421a3ed80e8ff4570dced6d7d2de8ece28e793bcb5 WatchSource:0}: Error finding container 6acfefc5ad1f3cd024a246421a3ed80e8ff4570dced6d7d2de8ece28e793bcb5: Status 404 returned error can't find the container with id 6acfefc5ad1f3cd024a246421a3ed80e8ff4570dced6d7d2de8ece28e793bcb5 Nov 29 07:02:21 crc kubenswrapper[4943]: E1129 07:02:21.060445 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:21 crc kubenswrapper[4943]: E1129 07:02:21.060689 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fzkx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-tg7s9_openstack-operators(b979c1f8-20ad-4694-98e5-674738e37f4c): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:21 crc kubenswrapper[4943]: E1129 07:02:21.064203 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" podUID="b979c1f8-20ad-4694-98e5-674738e37f4c" Nov 29 07:02:21 crc kubenswrapper[4943]: I1129 07:02:21.112995 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" event={"ID":"e75ee72e-f7fe-45a2-81f1-06ed23649f89","Type":"ContainerStarted","Data":"6acfefc5ad1f3cd024a246421a3ed80e8ff4570dced6d7d2de8ece28e793bcb5"} Nov 29 07:02:24 crc kubenswrapper[4943]: E1129 07:02:24.693151 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:0f523b7e2fa9e86fef986acf07d0c42d5658c475d565f11eaea926ebffcb6530" Nov 29 07:02:24 crc kubenswrapper[4943]: E1129 07:02:24.693199 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Nov 29 07:02:24 crc kubenswrapper[4943]: E1129 07:02:24.693761 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:0f523b7e2fa9e86fef986acf07d0c42d5658c475d565f11eaea926ebffcb6530,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-twjl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-6c548fd776-zqc94_openstack-operators(ac03d0ce-8e35-479d-9f8d-06e05955d2ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:02:24 crc kubenswrapper[4943]: E1129 07:02:24.693761 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l8fdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-bwq9n_openstack-operators(d4923779-fef3-444b-9276-5ca610c71fd4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:02:28 crc kubenswrapper[4943]: I1129 07:02:28.327647 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:02:28 crc kubenswrapper[4943]: E1129 07:02:28.328304 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:02:29 crc kubenswrapper[4943]: E1129 07:02:29.699448 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:29 crc kubenswrapper[4943]: E1129 07:02:29.699946 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cmsbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-ll829_openstack-operators(66d0d275-3a0b-45a1-8b57-dc7ec4559888): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:29 crc kubenswrapper[4943]: E1129 07:02:29.701267 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" podUID="66d0d275-3a0b-45a1-8b57-dc7ec4559888" Nov 29 07:02:43 crc kubenswrapper[4943]: I1129 07:02:43.328930 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:02:43 crc kubenswrapper[4943]: E1129 07:02:43.329801 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:02:44 crc kubenswrapper[4943]: E1129 07:02:44.795437 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:44 crc kubenswrapper[4943]: E1129 07:02:44.795582 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gqxtx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-jq6kn_openstack-operators(74ea0cf0-4b53-4342-9ea4-c2e4db748104): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:44 crc kubenswrapper[4943]: E1129 07:02:44.796712 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" podUID="74ea0cf0-4b53-4342-9ea4-c2e4db748104" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.729711 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.730213 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q66x7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-6546668bfd-wchm4_openstack-operators(c35fc2d0-5f79-4edf-86ec-d6f1add18551): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.730450 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.730555 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vzps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-v2kbm_openstack-operators(980cdf9e-3763-4aca-a92a-1f4ca61c1501): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.732248 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" podUID="980cdf9e-3763-4aca-a92a-1f4ca61c1501" Nov 29 07:02:48 crc kubenswrapper[4943]: E1129 07:02:48.732277 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" podUID="c35fc2d0-5f79-4edf-86ec-d6f1add18551" Nov 29 07:02:51 crc kubenswrapper[4943]: E1129 07:02:51.390934 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:51 crc kubenswrapper[4943]: E1129 07:02:51.391300 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9pthf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-f4nk6_openstack-operators(4602188b-0cc4-4b1e-80e8-a2f40fd43da0): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:51 crc kubenswrapper[4943]: E1129 07:02:51.392518 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" podUID="4602188b-0cc4-4b1e-80e8-a2f40fd43da0" Nov 29 07:02:52 crc kubenswrapper[4943]: E1129 07:02:52.219208 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:52 crc kubenswrapper[4943]: E1129 07:02:52.219455 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-npmxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-vbk9x_openstack-operators(c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:52 crc kubenswrapper[4943]: E1129 07:02:52.220779 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" podUID="c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0" Nov 29 07:02:54 crc kubenswrapper[4943]: I1129 07:02:54.252170 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:02:54 crc kubenswrapper[4943]: I1129 07:02:54.896396 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.185376 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:09a6d0613ee2d3c1c809fc36c22678458ac271e0da87c970aec0a5339f5423f7" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.185585 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:09a6d0613ee2d3c1c809fc36c22678458ac271e0da87c970aec0a5339f5423f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pvp2f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-57548d458d-kg4v7_openstack-operators(0ec91d76-10f1-458f-b999-6212f13f5e18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.739546 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:14cfad6ea2e7f7ecc4cb2aafceb9c61514b3d04b66668832d1e4ac3b19f1ab81" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.740975 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:14cfad6ea2e7f7ecc4cb2aafceb9c61514b3d04b66668832d1e4ac3b19f1ab81,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wf87c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw_openstack-operators(882924cc-0259-4c98-b40a-1c02eeadaa09): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.792178 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.792495 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rgz92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-546d4bdf48-v6d5z_openstack-operators(0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.793800 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501: Get \\\"https://quay.io/v2/openstack-k8s-operators/keystone-operator/blobs/sha256:7d6ca59745ac48971cbc2d72b53fe413144fa5c0c21f2ef1d7aaf1291851e501\\\": context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" podUID="0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.831132 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.831315 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dhtgw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-rp427_openstack-operators(d7ba2d7b-5840-4cc7-95d9-1953d8c0250b): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.832605 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" podUID="d7ba2d7b-5840-4cc7-95d9-1953d8c0250b" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.860829 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.860990 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tghcn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-jsgws_openstack-operators(7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.864362 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" podUID="7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3" Nov 29 07:02:56 crc kubenswrapper[4943]: W1129 07:02:56.877019 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb42c5c6f_e81f_42a4_b303_686a695ab49c.slice/crio-c3243940c1af51f274db0d61ec103b0d534c533194a87cff6390ac0a115d5468 WatchSource:0}: Error finding container c3243940c1af51f274db0d61ec103b0d534c533194a87cff6390ac0a115d5468: Status 404 returned error can't find the container with id c3243940c1af51f274db0d61ec103b0d534c533194a87cff6390ac0a115d5468 Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.902558 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.902791 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xqhnw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7d9dfd778-94jbz_openstack-operators(c9815594-e69f-411a-9bf1-b0c064eb5180): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 29 07:02:56 crc kubenswrapper[4943]: E1129 07:02:56.904105 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" podUID="c9815594-e69f-411a-9bf1-b0c064eb5180" Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.381459 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" event={"ID":"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7","Type":"ContainerStarted","Data":"cee9d1b7501dc42ecc0ab450108e82c9abc2390e09314fc6c783f4fb6396989f"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.395031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" event={"ID":"e75ee72e-f7fe-45a2-81f1-06ed23649f89","Type":"ContainerStarted","Data":"c688441e0803b08b82ee9a014fb3b836c98b053939b4e2cd1eb9ad43a4d5b93b"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.395620 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.415888 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" event={"ID":"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55","Type":"ContainerStarted","Data":"4e67c9f99e1c36c8a611b455ff13cccba3878f9ea20979cd26f731e7026597ac"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.419454 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerStarted","Data":"e448be6dcc273fa0ddbb00befcd888dfa8946ff13217e9a1bfc97b63a132e3fd"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.422808 4943 generic.go:334] "Generic (PLEG): container finished" podID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerID="8626f0dfa939862d112566f5f942c4bce9d25410da9aa43a1ae5fee7920973fa" exitCode=0 Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.422883 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerDied","Data":"8626f0dfa939862d112566f5f942c4bce9d25410da9aa43a1ae5fee7920973fa"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.422907 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerStarted","Data":"c3243940c1af51f274db0d61ec103b0d534c533194a87cff6390ac0a115d5468"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.450606 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" podStartSLOduration=185.450577301 podStartE2EDuration="3m5.450577301s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:02:57.434652709 +0000 UTC m=+1752.364741482" watchObservedRunningTime="2025-11-29 07:02:57.450577301 +0000 UTC m=+1752.380666054" Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.459201 4943 generic.go:334] "Generic (PLEG): container finished" podID="8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" containerID="c016d22ad0c11f6bdc334ef85079d2713d73350933b18454e601dd1c666496d3" exitCode=0 Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.459323 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" event={"ID":"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7","Type":"ContainerDied","Data":"c016d22ad0c11f6bdc334ef85079d2713d73350933b18454e601dd1c666496d3"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.472453 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" event={"ID":"f98a9e94-a4bf-4980-9ab0-efd202b4ee30","Type":"ContainerStarted","Data":"9085cb0b9238fcff1cc1d1d37989a798c6f918c1f70e0a8a097924a8efd760ab"} Nov 29 07:02:57 crc kubenswrapper[4943]: I1129 07:02:57.482522 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" event={"ID":"b60c121d-d0c2-4e2e-be92-6da802d74dd6","Type":"ContainerStarted","Data":"91a466170c8cf6e8d57a727e159c44386a375fc76031cbbb198b2576437ca197"} Nov 29 07:02:57 crc kubenswrapper[4943]: E1129 07:02:57.502190 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb42c5c6f_e81f_42a4_b303_686a695ab49c.slice/crio-8626f0dfa939862d112566f5f942c4bce9d25410da9aa43a1ae5fee7920973fa.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb42c5c6f_e81f_42a4_b303_686a695ab49c.slice/crio-conmon-8626f0dfa939862d112566f5f942c4bce9d25410da9aa43a1ae5fee7920973fa.scope\": RecentStats: unable to find data in memory cache]" Nov 29 07:02:58 crc kubenswrapper[4943]: I1129 07:02:58.327717 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:02:58 crc kubenswrapper[4943]: E1129 07:02:58.328312 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:02:58 crc kubenswrapper[4943]: I1129 07:02:58.492974 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerStarted","Data":"63954b7d3f211f2a744658394a7079cddebf8de110740320373a2c8bab62d7fd"} Nov 29 07:02:58 crc kubenswrapper[4943]: I1129 07:02:58.507940 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" event={"ID":"83a6e478-8cc1-4061-b06f-e0b9faf51ede","Type":"ContainerStarted","Data":"61c3de904341e7d7344346f7b783cabad4121e77bd857a9cf69e819ca1a4209b"} Nov 29 07:02:58 crc kubenswrapper[4943]: I1129 07:02:58.511462 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" event={"ID":"b979c1f8-20ad-4694-98e5-674738e37f4c","Type":"ContainerStarted","Data":"0c040ebd5e29b5d2e6df6eee7f1850919e0d296e6ea572a42a592938c7be9e45"} Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.521419 4943 generic.go:334] "Generic (PLEG): container finished" podID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerID="63954b7d3f211f2a744658394a7079cddebf8de110740320373a2c8bab62d7fd" exitCode=0 Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.521511 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerDied","Data":"63954b7d3f211f2a744658394a7079cddebf8de110740320373a2c8bab62d7fd"} Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.524811 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.525161 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" event={"ID":"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7","Type":"ContainerDied","Data":"4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5"} Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.525207 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4da7cae147f41a598615c4c0914be4c9fac3e75a797598dc0d3007846baf92b5" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.662028 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume\") pod \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.662104 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume\") pod \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.662212 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g9r2\" (UniqueName: \"kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2\") pod \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\" (UID: \"8324b1a2-5aa0-4ed0-b6d2-91fad49082a7\") " Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.662977 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume" (OuterVolumeSpecName: "config-volume") pod "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" (UID: "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.675836 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" (UID: "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.676091 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2" (OuterVolumeSpecName: "kube-api-access-9g9r2") pod "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" (UID: "8324b1a2-5aa0-4ed0-b6d2-91fad49082a7"). InnerVolumeSpecName "kube-api-access-9g9r2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.763485 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.763542 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:02:59 crc kubenswrapper[4943]: I1129 07:02:59.763553 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g9r2\" (UniqueName: \"kubernetes.io/projected/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7-kube-api-access-9g9r2\") on node \"crc\" DevicePath \"\"" Nov 29 07:03:00 crc kubenswrapper[4943]: I1129 07:03:00.577512 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvhlh" podStartSLOduration=5.897313352 podStartE2EDuration="3m8.577489052s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.180105535 +0000 UTC m=+1569.110194288" lastFinishedPulling="2025-11-29 07:02:56.860281235 +0000 UTC m=+1751.790369988" observedRunningTime="2025-11-29 07:02:59.579878854 +0000 UTC m=+1754.509967617" watchObservedRunningTime="2025-11-29 07:03:00.577489052 +0000 UTC m=+1755.507577805" Nov 29 07:03:01 crc kubenswrapper[4943]: I1129 07:03:01.247405 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" event={"ID":"5cfbb650-258a-4cf5-8ada-c721fa5aee9a","Type":"ContainerStarted","Data":"f04a370329e374e80ebd40a6a36bbaf7dae27d49d52855c3598a2167ab44da0e"} Nov 29 07:03:01 crc kubenswrapper[4943]: I1129 07:03:01.249271 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" event={"ID":"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f","Type":"ContainerStarted","Data":"bedcc1efc1c5cfac573d1991a73165d139c7d7b4ea8b5a7a2f73a94dd8d7b13a"} Nov 29 07:03:01 crc kubenswrapper[4943]: I1129 07:03:01.251070 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm" Nov 29 07:03:01 crc kubenswrapper[4943]: I1129 07:03:01.261377 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" event={"ID":"74ea0cf0-4b53-4342-9ea4-c2e4db748104","Type":"ContainerStarted","Data":"c004fb2fe643af3a144b744fac820f59793167b9cc05487a84a80528c96401e4"} Nov 29 07:03:03 crc kubenswrapper[4943]: I1129 07:03:03.268860 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" event={"ID":"66d0d275-3a0b-45a1-8b57-dc7ec4559888","Type":"ContainerStarted","Data":"284bf12f2af5625e0b1648a76281d52708c41d7e42559e0de20ae3f19362eaba"} Nov 29 07:03:04 crc kubenswrapper[4943]: I1129 07:03:04.278334 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" event={"ID":"c35fc2d0-5f79-4edf-86ec-d6f1add18551","Type":"ContainerStarted","Data":"825728de8e4e96f4da50d9d1e11a6fe58e2e4ca10fbed60a8eb1929146632ebe"} Nov 29 07:03:04 crc kubenswrapper[4943]: I1129 07:03:04.801654 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-cb46f9b59-7249w" Nov 29 07:03:07 crc kubenswrapper[4943]: I1129 07:03:07.299844 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" event={"ID":"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b","Type":"ContainerStarted","Data":"969b3d541717a6da529500428ea038530b18aa724587fdb6eb4ded9696da626d"} Nov 29 07:03:10 crc kubenswrapper[4943]: I1129 07:03:10.320125 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" event={"ID":"c9815594-e69f-411a-9bf1-b0c064eb5180","Type":"ContainerStarted","Data":"5652160e86db8b100d38364e6a22e2afd99f032e2e828b69d1ab810a5afec2f1"} Nov 29 07:03:12 crc kubenswrapper[4943]: I1129 07:03:12.327323 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:03:12 crc kubenswrapper[4943]: E1129 07:03:12.327927 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.339493 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" event={"ID":"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3","Type":"ContainerStarted","Data":"5de1ef839d3982dba7ecb7990391069d24137b368e04cc770766e4252849eb66"} Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.342593 4943 generic.go:334] "Generic (PLEG): container finished" podID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerID="011ed3c51b8af0405ee8eccbce935e705aa2431acbef0c319513ec42776a76dc" exitCode=0 Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.342623 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerDied","Data":"011ed3c51b8af0405ee8eccbce935e705aa2431acbef0c319513ec42776a76dc"} Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.671465 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:03:13 crc kubenswrapper[4943]: E1129 07:03:13.672144 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" containerName="collect-profiles" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.672241 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" containerName="collect-profiles" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.672425 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" containerName="collect-profiles" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.673580 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.688844 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.783782 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq2tn\" (UniqueName: \"kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.783843 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.783883 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.885319 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq2tn\" (UniqueName: \"kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.885751 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.886313 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.886365 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.886610 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.920110 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq2tn\" (UniqueName: \"kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn\") pod \"certified-operators-c5mpx\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:13 crc kubenswrapper[4943]: I1129 07:03:13.991939 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:03:14 crc kubenswrapper[4943]: I1129 07:03:14.716454 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:03:14 crc kubenswrapper[4943]: W1129 07:03:14.748029 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd11578d8_5840_4b86_914c_673da047462b.slice/crio-6704ca5a6ea9d8f010b34ad1b454b5327e5d51d99ba688cb6a83eccfaaa658dc WatchSource:0}: Error finding container 6704ca5a6ea9d8f010b34ad1b454b5327e5d51d99ba688cb6a83eccfaaa658dc: Status 404 returned error can't find the container with id 6704ca5a6ea9d8f010b34ad1b454b5327e5d51d99ba688cb6a83eccfaaa658dc Nov 29 07:03:15 crc kubenswrapper[4943]: I1129 07:03:15.369717 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" event={"ID":"d4923779-fef3-444b-9276-5ca610c71fd4","Type":"ContainerStarted","Data":"5c3acbe538995d9362de2fcafd1427cebfe82d91d88fbeef90013b8f541909ba"} Nov 29 07:03:15 crc kubenswrapper[4943]: I1129 07:03:15.370919 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" event={"ID":"980cdf9e-3763-4aca-a92a-1f4ca61c1501","Type":"ContainerStarted","Data":"b514927bf400ce13e7f3b810df5068bfbaff62b67f2a9d45e07b58f6f7d9167e"} Nov 29 07:03:15 crc kubenswrapper[4943]: I1129 07:03:15.371799 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerStarted","Data":"6704ca5a6ea9d8f010b34ad1b454b5327e5d51d99ba688cb6a83eccfaaa658dc"} Nov 29 07:03:15 crc kubenswrapper[4943]: I1129 07:03:15.375136 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerStarted","Data":"f7e8783699bf8d37d3f22e38f99332000863f385e012ec802254f18059530c6b"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.401289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" event={"ID":"5cfbb650-258a-4cf5-8ada-c721fa5aee9a","Type":"ContainerStarted","Data":"76c677cf2ebdfc0c1a82aadec5df9c2aec379d47614d64ab57ba0a3c3ba3687b"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.405017 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" event={"ID":"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f","Type":"ContainerStarted","Data":"18456bb7599f66bd1f95ec565e13ba07ff2d59b3e98384d5daf68bb11df57549"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.406470 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" event={"ID":"4602188b-0cc4-4b1e-80e8-a2f40fd43da0","Type":"ContainerStarted","Data":"6ff68ce302c33a2e44b3a2ef26a00ad83cf1b672af5d4a1044b9b8bd3b2643af"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.408497 4943 generic.go:334] "Generic (PLEG): container finished" podID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerID="f7e8783699bf8d37d3f22e38f99332000863f385e012ec802254f18059530c6b" exitCode=0 Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.408579 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerDied","Data":"f7e8783699bf8d37d3f22e38f99332000863f385e012ec802254f18059530c6b"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.410247 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" event={"ID":"f98a9e94-a4bf-4980-9ab0-efd202b4ee30","Type":"ContainerStarted","Data":"81f0da02cc731faed2885e29c5ad09d82956978130405e250725cb5b85887482"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.413594 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" event={"ID":"c35fc2d0-5f79-4edf-86ec-d6f1add18551","Type":"ContainerStarted","Data":"ac345b4f96f516b916a56e9cb55569186e26f7f225e7cb68e325d5a38fa4b399"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.415143 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" event={"ID":"882924cc-0259-4c98-b40a-1c02eeadaa09","Type":"ContainerStarted","Data":"220a5f00b884ebd3eb98a4237ac758dcc1c2024bdc362905fe756d7c2fb178b1"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.417388 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" event={"ID":"ac03d0ce-8e35-479d-9f8d-06e05955d2ce","Type":"ContainerStarted","Data":"23e715a4aaaf3a25d4af5601be6f61faf571a1accff78e18ccf745569b2f8b40"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.419259 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" event={"ID":"66d0d275-3a0b-45a1-8b57-dc7ec4559888","Type":"ContainerStarted","Data":"b4955a0771aa1ad3b737252c40766381d7ce4d236fc8e379bfb3ea78187d9d79"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.420550 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" event={"ID":"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0","Type":"ContainerStarted","Data":"6d30a0137b216720918af034d08f87417ab4e4c4c43728801fdcb907a90d16fa"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.425857 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" event={"ID":"868d0e27-4b0a-4cb0-a2a6-13d58e257c8f","Type":"ContainerStarted","Data":"0bd5c4ff48d34625d4520e47b8b16dd70c8865e9549f437fcbbefd8100c66586"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.430229 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" event={"ID":"b979c1f8-20ad-4694-98e5-674738e37f4c","Type":"ContainerStarted","Data":"2107d68669c680b2e96db60c877ff36c7724b5151781e1a49ba0495fa4ab966f"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.437132 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" event={"ID":"74ea0cf0-4b53-4342-9ea4-c2e4db748104","Type":"ContainerStarted","Data":"b4c5a3d6b2f0d19f3805a7337f708d39405816b73f09e443d3a7bb598194cffe"} Nov 29 07:03:16 crc kubenswrapper[4943]: I1129 07:03:16.446477 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" event={"ID":"5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55","Type":"ContainerStarted","Data":"d6f52fb71ff8de3229dff964d6e25f9a4593a17324dc225c64c064bf049519d7"} Nov 29 07:03:16 crc kubenswrapper[4943]: E1129 07:03:16.465879 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" podUID="d4923779-fef3-444b-9276-5ca610c71fd4" Nov 29 07:03:16 crc kubenswrapper[4943]: E1129 07:03:16.742128 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" podUID="ac03d0ce-8e35-479d-9f8d-06e05955d2ce" Nov 29 07:03:16 crc kubenswrapper[4943]: E1129 07:03:16.750767 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" podUID="882924cc-0259-4c98-b40a-1c02eeadaa09" Nov 29 07:03:16 crc kubenswrapper[4943]: E1129 07:03:16.810412 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" podUID="0ec91d76-10f1-458f-b999-6212f13f5e18" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.465805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" event={"ID":"7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3","Type":"ContainerStarted","Data":"934aaa9725f8c38ff0dcee11d2d1f8d4b2f7321077014314c7255d9b0acf1cea"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.466454 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.467819 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" event={"ID":"0ec91d76-10f1-458f-b999-6212f13f5e18","Type":"ContainerStarted","Data":"7518f6d6c66446d233f44657a5ee2758784dfba22aed5fceedacc900cdc57662"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.472986 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.474069 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" event={"ID":"d7ba2d7b-5840-4cc7-95d9-1953d8c0250b","Type":"ContainerStarted","Data":"cf78baab2e9621d4e57d48fc58d97d76ae48a90582fb87f7f6e8d24563cbd6b7"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.474693 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.479222 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.488199 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" event={"ID":"980cdf9e-3763-4aca-a92a-1f4ca61c1501","Type":"ContainerStarted","Data":"df48f489e9ee547db4e06f390bd582c9022b4b41b16f67da1f81bd0295b91fa2"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.489071 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.493974 4943 generic.go:334] "Generic (PLEG): container finished" podID="d11578d8-5840-4b86-914c-673da047462b" containerID="c77ad4a0dad6d78c71c20ddf1b52466b89aacf0008552a47640b8287724d0045" exitCode=0 Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.494039 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerDied","Data":"c77ad4a0dad6d78c71c20ddf1b52466b89aacf0008552a47640b8287724d0045"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.497854 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" event={"ID":"c9815594-e69f-411a-9bf1-b0c064eb5180","Type":"ContainerStarted","Data":"deea0be252f213155ce8c0ed68d02c3a4c26a77ba88aea2472ff98685e4cc39d"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.498424 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.500905 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.501837 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" event={"ID":"b60c121d-d0c2-4e2e-be92-6da802d74dd6","Type":"ContainerStarted","Data":"a7d00cffacfcdf29fdcf95f92de0062c3aa4d6114c50f867ede9d149e534a035"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.502786 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.507115 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-jsgws" podStartSLOduration=14.863016384 podStartE2EDuration="3m25.507077653s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.067817657 +0000 UTC m=+1568.997906410" lastFinishedPulling="2025-11-29 07:03:04.711878926 +0000 UTC m=+1759.641967679" observedRunningTime="2025-11-29 07:03:17.486009874 +0000 UTC m=+1772.416098647" watchObservedRunningTime="2025-11-29 07:03:17.507077653 +0000 UTC m=+1772.437166406" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.507319 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.507731 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" event={"ID":"b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7","Type":"ContainerStarted","Data":"c83fb4e01de2e6a76fde2d4ca90fc115817127f0d3ef680f6be47c5667554962"} Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.508836 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.508874 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.513551 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.565764 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.568189 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-rp427" podStartSLOduration=14.067945421 podStartE2EDuration="3m25.568166509s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.051033077 +0000 UTC m=+1567.981121830" lastFinishedPulling="2025-11-29 07:03:04.551254175 +0000 UTC m=+1759.481342918" observedRunningTime="2025-11-29 07:03:17.526862531 +0000 UTC m=+1772.456951284" watchObservedRunningTime="2025-11-29 07:03:17.568166509 +0000 UTC m=+1772.498255262" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.583723 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-wchm4" podStartSLOduration=5.801370853 podStartE2EDuration="3m25.583701122s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.932990638 +0000 UTC m=+1568.863079391" lastFinishedPulling="2025-11-29 07:03:13.715320907 +0000 UTC m=+1768.645409660" observedRunningTime="2025-11-29 07:03:17.583006125 +0000 UTC m=+1772.513094888" watchObservedRunningTime="2025-11-29 07:03:17.583701122 +0000 UTC m=+1772.513789875" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.616019 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" podStartSLOduration=13.520870709 podStartE2EDuration="3m25.615995818s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.084460236 +0000 UTC m=+1569.014548989" lastFinishedPulling="2025-11-29 07:03:06.179585345 +0000 UTC m=+1761.109674098" observedRunningTime="2025-11-29 07:03:17.610959044 +0000 UTC m=+1772.541047817" watchObservedRunningTime="2025-11-29 07:03:17.615995818 +0000 UTC m=+1772.546084581" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.642905 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" podStartSLOduration=13.453619583 podStartE2EDuration="3m25.642883522s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.975223095 +0000 UTC m=+1568.905311848" lastFinishedPulling="2025-11-29 07:03:06.164487034 +0000 UTC m=+1761.094575787" observedRunningTime="2025-11-29 07:03:17.633652864 +0000 UTC m=+1772.563741627" watchObservedRunningTime="2025-11-29 07:03:17.642883522 +0000 UTC m=+1772.572972275" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.664069 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" podStartSLOduration=13.743863724 podStartE2EDuration="3m26.664050154s" podCreationTimestamp="2025-11-29 06:59:51 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.23361936 +0000 UTC m=+1568.163708103" lastFinishedPulling="2025-11-29 07:03:06.15380578 +0000 UTC m=+1761.083894533" observedRunningTime="2025-11-29 07:03:17.662963517 +0000 UTC m=+1772.593052270" watchObservedRunningTime="2025-11-29 07:03:17.664050154 +0000 UTC m=+1772.594138897" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.738544 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" podStartSLOduration=12.963864779 podStartE2EDuration="3m25.73851988s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.403790417 +0000 UTC m=+1568.333879170" lastFinishedPulling="2025-11-29 07:03:06.178445518 +0000 UTC m=+1761.108534271" observedRunningTime="2025-11-29 07:03:17.714785384 +0000 UTC m=+1772.644874157" watchObservedRunningTime="2025-11-29 07:03:17.73851988 +0000 UTC m=+1772.668608643" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.757422 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" podStartSLOduration=5.570351841 podStartE2EDuration="3m25.757347253s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.980432753 +0000 UTC m=+1568.910521506" lastFinishedPulling="2025-11-29 07:03:14.167428165 +0000 UTC m=+1769.097516918" observedRunningTime="2025-11-29 07:03:17.740555569 +0000 UTC m=+1772.670644332" watchObservedRunningTime="2025-11-29 07:03:17.757347253 +0000 UTC m=+1772.687436006" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.768035 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-ll829" podStartSLOduration=7.468658938 podStartE2EDuration="3m25.768016957s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.083455462 +0000 UTC m=+1569.013544215" lastFinishedPulling="2025-11-29 07:03:12.382813471 +0000 UTC m=+1767.312902234" observedRunningTime="2025-11-29 07:03:17.761549797 +0000 UTC m=+1772.691638570" watchObservedRunningTime="2025-11-29 07:03:17.768016957 +0000 UTC m=+1772.698105710" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.785456 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" podStartSLOduration=12.811901113 podStartE2EDuration="3m25.785440056s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.204784121 +0000 UTC m=+1568.134872874" lastFinishedPulling="2025-11-29 07:03:06.178323064 +0000 UTC m=+1761.108411817" observedRunningTime="2025-11-29 07:03:17.783951289 +0000 UTC m=+1772.714040052" watchObservedRunningTime="2025-11-29 07:03:17.785440056 +0000 UTC m=+1772.715528819" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.804698 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" podStartSLOduration=5.6141766220000004 podStartE2EDuration="3m25.804682581s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.977309996 +0000 UTC m=+1568.907398749" lastFinishedPulling="2025-11-29 07:03:14.167815955 +0000 UTC m=+1769.097904708" observedRunningTime="2025-11-29 07:03:17.798156249 +0000 UTC m=+1772.728245012" watchObservedRunningTime="2025-11-29 07:03:17.804682581 +0000 UTC m=+1772.734771334" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.840415 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-94jbz" podStartSLOduration=15.886176907 podStartE2EDuration="3m26.840395442s" podCreationTimestamp="2025-11-29 06:59:51 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.88172041 +0000 UTC m=+1568.811809163" lastFinishedPulling="2025-11-29 07:03:04.835938945 +0000 UTC m=+1759.766027698" observedRunningTime="2025-11-29 07:03:17.837013438 +0000 UTC m=+1772.767102201" watchObservedRunningTime="2025-11-29 07:03:17.840395442 +0000 UTC m=+1772.770484195" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.886788 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" podStartSLOduration=6.538308468 podStartE2EDuration="3m26.886761175s" podCreationTimestamp="2025-11-29 06:59:51 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.817970844 +0000 UTC m=+1568.748059607" lastFinishedPulling="2025-11-29 07:03:14.166423561 +0000 UTC m=+1769.096512314" observedRunningTime="2025-11-29 07:03:17.863296606 +0000 UTC m=+1772.793385359" watchObservedRunningTime="2025-11-29 07:03:17.886761175 +0000 UTC m=+1772.816849938" Nov 29 07:03:17 crc kubenswrapper[4943]: I1129 07:03:17.954698 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-jgxw4" podStartSLOduration=7.547135405 podStartE2EDuration="3m25.954610588s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.977280706 +0000 UTC m=+1568.907369459" lastFinishedPulling="2025-11-29 07:03:12.384755889 +0000 UTC m=+1767.314844642" observedRunningTime="2025-11-29 07:03:17.951080421 +0000 UTC m=+1772.881169184" watchObservedRunningTime="2025-11-29 07:03:17.954610588 +0000 UTC m=+1772.884699351" Nov 29 07:03:18 crc kubenswrapper[4943]: I1129 07:03:18.514933 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 07:03:18 crc kubenswrapper[4943]: I1129 07:03:18.518069 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-6hb4z" Nov 29 07:03:19 crc kubenswrapper[4943]: I1129 07:03:19.523643 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-v2kbm" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.343198 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.346708 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-ncg7s" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.419295 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.422641 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-knfbt" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.627122 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.629525 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-546795bfb5-9dvgz" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.764504 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.766974 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tg7s9" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.780303 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.785981 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-hrvxb" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.815342 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 07:03:22 crc kubenswrapper[4943]: I1129 07:03:22.818396 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-jq6kn" Nov 29 07:03:27 crc kubenswrapper[4943]: I1129 07:03:27.326838 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:03:27 crc kubenswrapper[4943]: E1129 07:03:27.327380 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:03:27 crc kubenswrapper[4943]: I1129 07:03:27.579117 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" event={"ID":"0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f","Type":"ContainerStarted","Data":"49b8fa9c66cbb3cf5bf3da936bc3877a4a84907161c2cf84741534d5aa970ceb"} Nov 29 07:03:27 crc kubenswrapper[4943]: I1129 07:03:27.580586 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" event={"ID":"4602188b-0cc4-4b1e-80e8-a2f40fd43da0","Type":"ContainerStarted","Data":"79d3d7389f80ddd0b7bf12a555f481a434d39b132af881858ecaf3c88836f6a5"} Nov 29 07:03:28 crc kubenswrapper[4943]: I1129 07:03:28.590708 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" event={"ID":"c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0","Type":"ContainerStarted","Data":"ff7f5562e2627bcd1a902c0fe84de283a5cf762263fa60a75c9c0e2d27d3ec70"} Nov 29 07:03:29 crc kubenswrapper[4943]: I1129 07:03:29.597946 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 07:03:29 crc kubenswrapper[4943]: I1129 07:03:29.599685 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" Nov 29 07:03:29 crc kubenswrapper[4943]: I1129 07:03:29.627123 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-f4nk6" podStartSLOduration=17.543050355 podStartE2EDuration="3m37.627093931s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.084180019 +0000 UTC m=+1569.014268762" lastFinishedPulling="2025-11-29 07:03:14.168223585 +0000 UTC m=+1769.098312338" observedRunningTime="2025-11-29 07:03:29.619191726 +0000 UTC m=+1784.549280569" watchObservedRunningTime="2025-11-29 07:03:29.627093931 +0000 UTC m=+1784.557182724" Nov 29 07:03:40 crc kubenswrapper[4943]: I1129 07:03:40.327296 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:03:43 crc kubenswrapper[4943]: E1129 07:03:40.328101 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:42.708630 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:42.712013 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:42.737210 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-v6d5z" podStartSLOduration=30.325202982 podStartE2EDuration="3m50.737192487s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:53.755206964 +0000 UTC m=+1568.685295717" lastFinishedPulling="2025-11-29 07:03:14.167196469 +0000 UTC m=+1769.097285222" observedRunningTime="2025-11-29 07:03:42.730660727 +0000 UTC m=+1797.660749560" watchObservedRunningTime="2025-11-29 07:03:42.737192487 +0000 UTC m=+1797.667281230" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:43.719165 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:43.728283 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" Nov 29 07:03:43 crc kubenswrapper[4943]: I1129 07:03:43.743698 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-vbk9x" podStartSLOduration=31.794978803 podStartE2EDuration="3m51.743671335s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.219623915 +0000 UTC m=+1569.149712668" lastFinishedPulling="2025-11-29 07:03:14.168316447 +0000 UTC m=+1769.098405200" observedRunningTime="2025-11-29 07:03:43.742436914 +0000 UTC m=+1798.672525687" watchObservedRunningTime="2025-11-29 07:03:43.743671335 +0000 UTC m=+1798.673760128" Nov 29 07:03:52 crc kubenswrapper[4943]: I1129 07:03:52.327622 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:03:52 crc kubenswrapper[4943]: E1129 07:03:52.328603 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:03:59 crc kubenswrapper[4943]: I1129 07:03:59.837204 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" event={"ID":"0ec91d76-10f1-458f-b999-6212f13f5e18","Type":"ContainerStarted","Data":"f08ba4982136869042dd738da17f401e735e4272242b866b0f391118da6c04b6"} Nov 29 07:03:59 crc kubenswrapper[4943]: I1129 07:03:59.840303 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" event={"ID":"882924cc-0259-4c98-b40a-1c02eeadaa09","Type":"ContainerStarted","Data":"4259f4693b666c37fd21222845f67e62c9c8add4044a30119662aef6d60b19d3"} Nov 29 07:03:59 crc kubenswrapper[4943]: I1129 07:03:59.842829 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerStarted","Data":"aa98b09ea6e62a0f6c64957c6adbae1c86e3372555e144f779de60143252a57c"} Nov 29 07:03:59 crc kubenswrapper[4943]: I1129 07:03:59.845543 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" event={"ID":"d4923779-fef3-444b-9276-5ca610c71fd4","Type":"ContainerStarted","Data":"4e910ccd5f8c28cc86b725ef06dd5361ba849903ac67a2432ca2137820e2bb96"} Nov 29 07:03:59 crc kubenswrapper[4943]: I1129 07:03:59.871782 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4pllj" podStartSLOduration=52.980287129 podStartE2EDuration="1m49.871763751s" podCreationTimestamp="2025-11-29 07:02:10 +0000 UTC" firstStartedPulling="2025-11-29 07:02:59.360258949 +0000 UTC m=+1754.290347702" lastFinishedPulling="2025-11-29 07:03:56.251735571 +0000 UTC m=+1811.181824324" observedRunningTime="2025-11-29 07:03:59.866795438 +0000 UTC m=+1814.796884211" watchObservedRunningTime="2025-11-29 07:03:59.871763751 +0000 UTC m=+1814.801852514" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.854294 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.854356 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.854371 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.875608 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" podStartSLOduration=151.235220773 podStartE2EDuration="4m8.875533641s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 07:02:19.11383674 +0000 UTC m=+1714.043925493" lastFinishedPulling="2025-11-29 07:03:56.754149558 +0000 UTC m=+1811.684238361" observedRunningTime="2025-11-29 07:04:00.872092046 +0000 UTC m=+1815.802180799" watchObservedRunningTime="2025-11-29 07:04:00.875533641 +0000 UTC m=+1815.805622434" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.902584 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" podStartSLOduration=144.914465269 podStartE2EDuration="4m8.902540007s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 07:02:12.776698993 +0000 UTC m=+1707.706787746" lastFinishedPulling="2025-11-29 07:03:56.764773721 +0000 UTC m=+1811.694862484" observedRunningTime="2025-11-29 07:04:00.902462635 +0000 UTC m=+1815.832551438" watchObservedRunningTime="2025-11-29 07:04:00.902540007 +0000 UTC m=+1815.832628770" Nov 29 07:04:00 crc kubenswrapper[4943]: I1129 07:04:00.927361 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" podStartSLOduration=6.237974604 podStartE2EDuration="4m8.927343349s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.064784243 +0000 UTC m=+1568.994872996" lastFinishedPulling="2025-11-29 07:03:56.754152948 +0000 UTC m=+1811.684241741" observedRunningTime="2025-11-29 07:04:00.921795222 +0000 UTC m=+1815.851883995" watchObservedRunningTime="2025-11-29 07:04:00.927343349 +0000 UTC m=+1815.857432102" Nov 29 07:04:01 crc kubenswrapper[4943]: I1129 07:04:01.324921 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:01 crc kubenswrapper[4943]: I1129 07:04:01.325008 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:01 crc kubenswrapper[4943]: I1129 07:04:01.385795 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:06 crc kubenswrapper[4943]: I1129 07:04:06.327292 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:04:06 crc kubenswrapper[4943]: E1129 07:04:06.328112 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.900121 4943 generic.go:334] "Generic (PLEG): container finished" podID="d11578d8-5840-4b86-914c-673da047462b" containerID="5182ee7c3f59fc37d28efbf02370292dcbc7d1293ec1490106dab66188d541a9" exitCode=0 Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.900178 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerDied","Data":"5182ee7c3f59fc37d28efbf02370292dcbc7d1293ec1490106dab66188d541a9"} Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.902946 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerStarted","Data":"35d8f39112d3a902bc367649beaab7bccf2ce64b38041ab7eb7bb2309fca6ee7"} Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.906636 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" event={"ID":"ac03d0ce-8e35-479d-9f8d-06e05955d2ce","Type":"ContainerStarted","Data":"adedfea0f163edbb8675ecaa82c5e45c904d8b45d409dc8527c2636c2031f1a4"} Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.906778 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.946784 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" podStartSLOduration=3.637664206 podStartE2EDuration="4m15.946767507s" podCreationTimestamp="2025-11-29 06:59:52 +0000 UTC" firstStartedPulling="2025-11-29 06:59:54.070556155 +0000 UTC m=+1569.000644908" lastFinishedPulling="2025-11-29 07:04:06.379659456 +0000 UTC m=+1821.309748209" observedRunningTime="2025-11-29 07:04:07.941631291 +0000 UTC m=+1822.871720064" watchObservedRunningTime="2025-11-29 07:04:07.946767507 +0000 UTC m=+1822.876856260" Nov 29 07:04:07 crc kubenswrapper[4943]: I1129 07:04:07.968819 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8m6j4" podStartSLOduration=49.458285999 podStartE2EDuration="1m53.968800561s" podCreationTimestamp="2025-11-29 07:02:14 +0000 UTC" firstStartedPulling="2025-11-29 07:03:01.815001836 +0000 UTC m=+1756.745090589" lastFinishedPulling="2025-11-29 07:04:06.325516408 +0000 UTC m=+1821.255605151" observedRunningTime="2025-11-29 07:04:07.960926677 +0000 UTC m=+1822.891015440" watchObservedRunningTime="2025-11-29 07:04:07.968800561 +0000 UTC m=+1822.898889314" Nov 29 07:04:08 crc kubenswrapper[4943]: I1129 07:04:08.089529 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-kg4v7" Nov 29 07:04:08 crc kubenswrapper[4943]: I1129 07:04:08.484493 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw" Nov 29 07:04:11 crc kubenswrapper[4943]: I1129 07:04:11.367859 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:11 crc kubenswrapper[4943]: I1129 07:04:11.411674 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:04:11 crc kubenswrapper[4943]: I1129 07:04:11.927873 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4pllj" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="registry-server" containerID="cri-o://aa98b09ea6e62a0f6c64957c6adbae1c86e3372555e144f779de60143252a57c" gracePeriod=2 Nov 29 07:04:12 crc kubenswrapper[4943]: I1129 07:04:12.816724 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-zqc94" Nov 29 07:04:12 crc kubenswrapper[4943]: I1129 07:04:12.867147 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-bwq9n" Nov 29 07:04:12 crc kubenswrapper[4943]: I1129 07:04:12.961197 4943 generic.go:334] "Generic (PLEG): container finished" podID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerID="aa98b09ea6e62a0f6c64957c6adbae1c86e3372555e144f779de60143252a57c" exitCode=0 Nov 29 07:04:12 crc kubenswrapper[4943]: I1129 07:04:12.961249 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerDied","Data":"aa98b09ea6e62a0f6c64957c6adbae1c86e3372555e144f779de60143252a57c"} Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.439830 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.487821 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk9mm\" (UniqueName: \"kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm\") pod \"b42c5c6f-e81f-42a4-b303-686a695ab49c\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.487995 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content\") pod \"b42c5c6f-e81f-42a4-b303-686a695ab49c\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.488073 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities\") pod \"b42c5c6f-e81f-42a4-b303-686a695ab49c\" (UID: \"b42c5c6f-e81f-42a4-b303-686a695ab49c\") " Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.489083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities" (OuterVolumeSpecName: "utilities") pod "b42c5c6f-e81f-42a4-b303-686a695ab49c" (UID: "b42c5c6f-e81f-42a4-b303-686a695ab49c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.494490 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm" (OuterVolumeSpecName: "kube-api-access-wk9mm") pod "b42c5c6f-e81f-42a4-b303-686a695ab49c" (UID: "b42c5c6f-e81f-42a4-b303-686a695ab49c"). InnerVolumeSpecName "kube-api-access-wk9mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.509069 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b42c5c6f-e81f-42a4-b303-686a695ab49c" (UID: "b42c5c6f-e81f-42a4-b303-686a695ab49c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.590034 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.590073 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b42c5c6f-e81f-42a4-b303-686a695ab49c-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.590083 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk9mm\" (UniqueName: \"kubernetes.io/projected/b42c5c6f-e81f-42a4-b303-686a695ab49c-kube-api-access-wk9mm\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.971289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerStarted","Data":"736db86b57e2cde1948c5f89866e24b4b8015dcb2e9b83f06d56c997872a0756"} Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.973544 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4pllj" event={"ID":"b42c5c6f-e81f-42a4-b303-686a695ab49c","Type":"ContainerDied","Data":"c3243940c1af51f274db0d61ec103b0d534c533194a87cff6390ac0a115d5468"} Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.973613 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4pllj" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.973623 4943 scope.go:117] "RemoveContainer" containerID="aa98b09ea6e62a0f6c64957c6adbae1c86e3372555e144f779de60143252a57c" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.992732 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:13 crc kubenswrapper[4943]: I1129 07:04:13.992780 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.001399 4943 scope.go:117] "RemoveContainer" containerID="011ed3c51b8af0405ee8eccbce935e705aa2431acbef0c319513ec42776a76dc" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.005338 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c5mpx" podStartSLOduration=37.846567992 podStartE2EDuration="1m1.005319022s" podCreationTimestamp="2025-11-29 07:03:13 +0000 UTC" firstStartedPulling="2025-11-29 07:03:49.115249614 +0000 UTC m=+1804.045338377" lastFinishedPulling="2025-11-29 07:04:12.274000654 +0000 UTC m=+1827.204089407" observedRunningTime="2025-11-29 07:04:14.001733764 +0000 UTC m=+1828.931822517" watchObservedRunningTime="2025-11-29 07:04:14.005319022 +0000 UTC m=+1828.935407775" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.022804 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.027868 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4pllj"] Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.045162 4943 scope.go:117] "RemoveContainer" containerID="8626f0dfa939862d112566f5f942c4bce9d25410da9aa43a1ae5fee7920973fa" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.724045 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.724100 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:14 crc kubenswrapper[4943]: I1129 07:04:14.765441 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:15 crc kubenswrapper[4943]: I1129 07:04:15.019018 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:15 crc kubenswrapper[4943]: I1129 07:04:15.067417 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-c5mpx" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="registry-server" probeResult="failure" output=< Nov 29 07:04:15 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:04:15 crc kubenswrapper[4943]: > Nov 29 07:04:15 crc kubenswrapper[4943]: I1129 07:04:15.336171 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" path="/var/lib/kubelet/pods/b42c5c6f-e81f-42a4-b303-686a695ab49c/volumes" Nov 29 07:04:18 crc kubenswrapper[4943]: I1129 07:04:18.206275 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:04:18 crc kubenswrapper[4943]: I1129 07:04:18.206800 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8m6j4" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="registry-server" containerID="cri-o://35d8f39112d3a902bc367649beaab7bccf2ce64b38041ab7eb7bb2309fca6ee7" gracePeriod=2 Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.008880 4943 generic.go:334] "Generic (PLEG): container finished" podID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerID="35d8f39112d3a902bc367649beaab7bccf2ce64b38041ab7eb7bb2309fca6ee7" exitCode=0 Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.008933 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerDied","Data":"35d8f39112d3a902bc367649beaab7bccf2ce64b38041ab7eb7bb2309fca6ee7"} Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.327227 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:04:19 crc kubenswrapper[4943]: E1129 07:04:19.327652 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.791994 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.877872 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities\") pod \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.877948 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content\") pod \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.878001 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7nbk\" (UniqueName: \"kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk\") pod \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\" (UID: \"8a2fe372-bfe7-463e-9a75-1a04648b3da0\") " Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.878826 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities" (OuterVolumeSpecName: "utilities") pod "8a2fe372-bfe7-463e-9a75-1a04648b3da0" (UID: "8a2fe372-bfe7-463e-9a75-1a04648b3da0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.882937 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk" (OuterVolumeSpecName: "kube-api-access-n7nbk") pod "8a2fe372-bfe7-463e-9a75-1a04648b3da0" (UID: "8a2fe372-bfe7-463e-9a75-1a04648b3da0"). InnerVolumeSpecName "kube-api-access-n7nbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.925469 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a2fe372-bfe7-463e-9a75-1a04648b3da0" (UID: "8a2fe372-bfe7-463e-9a75-1a04648b3da0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.979250 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.979286 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a2fe372-bfe7-463e-9a75-1a04648b3da0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:19 crc kubenswrapper[4943]: I1129 07:04:19.979297 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7nbk\" (UniqueName: \"kubernetes.io/projected/8a2fe372-bfe7-463e-9a75-1a04648b3da0-kube-api-access-n7nbk\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.022854 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8m6j4" event={"ID":"8a2fe372-bfe7-463e-9a75-1a04648b3da0","Type":"ContainerDied","Data":"e448be6dcc273fa0ddbb00befcd888dfa8946ff13217e9a1bfc97b63a132e3fd"} Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.022920 4943 scope.go:117] "RemoveContainer" containerID="35d8f39112d3a902bc367649beaab7bccf2ce64b38041ab7eb7bb2309fca6ee7" Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.022925 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8m6j4" Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.048522 4943 scope.go:117] "RemoveContainer" containerID="f7e8783699bf8d37d3f22e38f99332000863f385e012ec802254f18059530c6b" Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.056974 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.063494 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8m6j4"] Nov 29 07:04:20 crc kubenswrapper[4943]: I1129 07:04:20.080962 4943 scope.go:117] "RemoveContainer" containerID="63954b7d3f211f2a744658394a7079cddebf8de110740320373a2c8bab62d7fd" Nov 29 07:04:21 crc kubenswrapper[4943]: I1129 07:04:21.339710 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" path="/var/lib/kubelet/pods/8a2fe372-bfe7-463e-9a75-1a04648b3da0/volumes" Nov 29 07:04:24 crc kubenswrapper[4943]: I1129 07:04:24.038684 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:24 crc kubenswrapper[4943]: I1129 07:04:24.083127 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:24 crc kubenswrapper[4943]: I1129 07:04:24.282766 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:04:25 crc kubenswrapper[4943]: I1129 07:04:25.059097 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c5mpx" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="registry-server" containerID="cri-o://736db86b57e2cde1948c5f89866e24b4b8015dcb2e9b83f06d56c997872a0756" gracePeriod=2 Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.068751 4943 generic.go:334] "Generic (PLEG): container finished" podID="d11578d8-5840-4b86-914c-673da047462b" containerID="736db86b57e2cde1948c5f89866e24b4b8015dcb2e9b83f06d56c997872a0756" exitCode=0 Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.068824 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerDied","Data":"736db86b57e2cde1948c5f89866e24b4b8015dcb2e9b83f06d56c997872a0756"} Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.578902 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.673478 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq2tn\" (UniqueName: \"kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn\") pod \"d11578d8-5840-4b86-914c-673da047462b\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.673582 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content\") pod \"d11578d8-5840-4b86-914c-673da047462b\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.673624 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities\") pod \"d11578d8-5840-4b86-914c-673da047462b\" (UID: \"d11578d8-5840-4b86-914c-673da047462b\") " Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.674780 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities" (OuterVolumeSpecName: "utilities") pod "d11578d8-5840-4b86-914c-673da047462b" (UID: "d11578d8-5840-4b86-914c-673da047462b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.680912 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn" (OuterVolumeSpecName: "kube-api-access-gq2tn") pod "d11578d8-5840-4b86-914c-673da047462b" (UID: "d11578d8-5840-4b86-914c-673da047462b"). InnerVolumeSpecName "kube-api-access-gq2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.728526 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d11578d8-5840-4b86-914c-673da047462b" (UID: "d11578d8-5840-4b86-914c-673da047462b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.775522 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.775577 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq2tn\" (UniqueName: \"kubernetes.io/projected/d11578d8-5840-4b86-914c-673da047462b-kube-api-access-gq2tn\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:26 crc kubenswrapper[4943]: I1129 07:04:26.775593 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d11578d8-5840-4b86-914c-673da047462b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.080090 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5mpx" event={"ID":"d11578d8-5840-4b86-914c-673da047462b","Type":"ContainerDied","Data":"6704ca5a6ea9d8f010b34ad1b454b5327e5d51d99ba688cb6a83eccfaaa658dc"} Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.080198 4943 scope.go:117] "RemoveContainer" containerID="736db86b57e2cde1948c5f89866e24b4b8015dcb2e9b83f06d56c997872a0756" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.080157 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5mpx" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.102933 4943 scope.go:117] "RemoveContainer" containerID="5182ee7c3f59fc37d28efbf02370292dcbc7d1293ec1490106dab66188d541a9" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.117746 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.122541 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c5mpx"] Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.141965 4943 scope.go:117] "RemoveContainer" containerID="c77ad4a0dad6d78c71c20ddf1b52466b89aacf0008552a47640b8287724d0045" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.339645 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d11578d8-5840-4b86-914c-673da047462b" path="/var/lib/kubelet/pods/d11578d8-5840-4b86-914c-673da047462b/volumes" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793155 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793477 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793495 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793512 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793524 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793539 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793547 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793579 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793586 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793600 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793606 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793620 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793627 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793641 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793647 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="extract-content" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793656 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793661 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="extract-utilities" Nov 29 07:04:27 crc kubenswrapper[4943]: E1129 07:04:27.793669 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793674 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793823 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a2fe372-bfe7-463e-9a75-1a04648b3da0" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793837 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b42c5c6f-e81f-42a4-b303-686a695ab49c" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.793851 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11578d8-5840-4b86-914c-673da047462b" containerName="registry-server" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.794618 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.796589 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-8jd78" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.797388 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.797488 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.798201 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.819531 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.875333 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.877162 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.880866 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.890811 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.894502 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.894542 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.894617 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhl8w\" (UniqueName: \"kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.894648 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.894745 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvgnj\" (UniqueName: \"kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.998038 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvgnj\" (UniqueName: \"kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.998198 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.998234 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.998542 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhl8w\" (UniqueName: \"kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.998596 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.999674 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.999668 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:27 crc kubenswrapper[4943]: I1129 07:04:27.999787 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.019533 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvgnj\" (UniqueName: \"kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj\") pod \"dnsmasq-dns-78dd6ddcc-8tlj7\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.037047 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhl8w\" (UniqueName: \"kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w\") pod \"dnsmasq-dns-675f4bcbfc-7gzb2\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.112953 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.198418 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.606744 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:04:28 crc kubenswrapper[4943]: I1129 07:04:28.731110 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:04:28 crc kubenswrapper[4943]: W1129 07:04:28.733002 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9906bc4a_bff0_4622_b0de_79a0e03dda90.slice/crio-e4ed43579cb576b3db0494b4e16a03664302b83b820a44ddf8c7afe3ccd21b32 WatchSource:0}: Error finding container e4ed43579cb576b3db0494b4e16a03664302b83b820a44ddf8c7afe3ccd21b32: Status 404 returned error can't find the container with id e4ed43579cb576b3db0494b4e16a03664302b83b820a44ddf8c7afe3ccd21b32 Nov 29 07:04:29 crc kubenswrapper[4943]: I1129 07:04:29.097884 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" event={"ID":"9906bc4a-bff0-4622-b0de-79a0e03dda90","Type":"ContainerStarted","Data":"e4ed43579cb576b3db0494b4e16a03664302b83b820a44ddf8c7afe3ccd21b32"} Nov 29 07:04:29 crc kubenswrapper[4943]: I1129 07:04:29.099121 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" event={"ID":"441e3d11-b7c0-4c0b-9db7-18eeb2782242","Type":"ContainerStarted","Data":"cd85c804bb05b035a00ab18d7f2e6b529d62f09c189db132988a0b3f98f227c6"} Nov 29 07:04:30 crc kubenswrapper[4943]: I1129 07:04:30.971927 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.017354 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.022503 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.061467 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.147875 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jz27\" (UniqueName: \"kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.147922 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.147989 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.250324 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jz27\" (UniqueName: \"kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.250374 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.250434 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.255281 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.255326 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.277325 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jz27\" (UniqueName: \"kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27\") pod \"dnsmasq-dns-666b6646f7-tmzm2\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.329283 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:04:31 crc kubenswrapper[4943]: E1129 07:04:31.329516 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.353041 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.365706 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.379757 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.381340 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.385405 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.555489 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.555532 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kc22s\" (UniqueName: \"kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.555554 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.656557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.656640 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kc22s\" (UniqueName: \"kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.656684 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.657477 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.657644 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.676080 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kc22s\" (UniqueName: \"kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s\") pod \"dnsmasq-dns-57d769cc4f-86p65\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:31 crc kubenswrapper[4943]: I1129 07:04:31.705616 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.365822 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.446871 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.864679 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.866184 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.869417 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.869687 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.869807 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.873018 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.873759 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.873874 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.876901 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-xnlbn" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.886266 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.887656 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894059 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894118 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9r7k8" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894308 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894059 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894473 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.894544 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.899280 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.906156 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.911456 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980777 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980846 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980872 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980910 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980932 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980953 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980979 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.980994 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.981015 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.981034 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:32 crc kubenswrapper[4943]: I1129 07:04:32.981057 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x9v2\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.081930 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.081991 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082013 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082046 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082073 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082107 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082136 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082165 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082194 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082217 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082316 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082353 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082375 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clkpj\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082397 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082420 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082448 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082477 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082509 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x9v2\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082542 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082610 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082640 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.082664 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.083587 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.084454 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.084994 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.085926 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.086096 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.086178 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.105283 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x9v2\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.112912 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.112912 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.115118 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.115226 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.138526 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189793 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189824 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189871 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189903 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189941 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.189926 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190308 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190338 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190398 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190423 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clkpj\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190445 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190500 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.190919 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.191417 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.191674 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.192325 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.201137 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.202179 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.202881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.206604 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.206627 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clkpj\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.221877 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " pod="openstack/rabbitmq-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.246828 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:04:33 crc kubenswrapper[4943]: I1129 07:04:33.269006 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.055093 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.063224 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.068510 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.068817 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.069081 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-2gmpz" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.071078 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.075594 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.081848 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.211915 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.211981 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jkk8\" (UniqueName: \"kubernetes.io/projected/57b1dafb-1dfa-4f23-8335-50600bc5becb-kube-api-access-2jkk8\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212015 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212036 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-default\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212119 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-kolla-config\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212150 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.212181 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314583 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314643 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jkk8\" (UniqueName: \"kubernetes.io/projected/57b1dafb-1dfa-4f23-8335-50600bc5becb-kube-api-access-2jkk8\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314689 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314713 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-default\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314743 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314801 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-kolla-config\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314841 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.314878 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.316636 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.321735 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.323018 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b1dafb-1dfa-4f23-8335-50600bc5becb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.324013 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-default\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.331213 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/57b1dafb-1dfa-4f23-8335-50600bc5becb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.331390 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-kolla-config\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.333016 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57b1dafb-1dfa-4f23-8335-50600bc5becb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.335309 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jkk8\" (UniqueName: \"kubernetes.io/projected/57b1dafb-1dfa-4f23-8335-50600bc5becb-kube-api-access-2jkk8\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.348083 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"57b1dafb-1dfa-4f23-8335-50600bc5becb\") " pod="openstack/openstack-galera-0" Nov 29 07:04:34 crc kubenswrapper[4943]: I1129 07:04:34.398771 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.493952 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.495159 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.498753 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.498834 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.499020 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.502099 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-nsdk7" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.517543 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.639448 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.641710 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.644253 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.644616 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.647042 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-sdqs5" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.647858 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.647946 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648001 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648051 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648107 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648136 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljkk7\" (UniqueName: \"kubernetes.io/projected/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kube-api-access-ljkk7\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648157 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.648175 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.666279 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749143 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749254 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749292 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749312 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749350 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljkk7\" (UniqueName: \"kubernetes.io/projected/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kube-api-access-ljkk7\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749378 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749403 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749431 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749468 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.749584 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750041 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750354 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750436 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-kolla-config\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750493 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-config-data\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750540 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfrtq\" (UniqueName: \"kubernetes.io/projected/a2be496c-a331-4baf-b42b-453be5225812-kube-api-access-wfrtq\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.750775 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.751928 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.754536 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.758839 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.762681 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a756fe5f-1037-4ec3-b91a-fdce5d723f04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.778394 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljkk7\" (UniqueName: \"kubernetes.io/projected/a756fe5f-1037-4ec3-b91a-fdce5d723f04-kube-api-access-ljkk7\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.781095 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a756fe5f-1037-4ec3-b91a-fdce5d723f04\") " pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.828404 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.852023 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.852073 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.852180 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-kolla-config\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.852212 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-config-data\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.852249 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfrtq\" (UniqueName: \"kubernetes.io/projected/a2be496c-a331-4baf-b42b-453be5225812-kube-api-access-wfrtq\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.853626 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-kolla-config\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.853962 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2be496c-a331-4baf-b42b-453be5225812-config-data\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.859006 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.870166 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2be496c-a331-4baf-b42b-453be5225812-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.883483 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfrtq\" (UniqueName: \"kubernetes.io/projected/a2be496c-a331-4baf-b42b-453be5225812-kube-api-access-wfrtq\") pod \"memcached-0\" (UID: \"a2be496c-a331-4baf-b42b-453be5225812\") " pod="openstack/memcached-0" Nov 29 07:04:35 crc kubenswrapper[4943]: I1129 07:04:35.961949 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.169430 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" event={"ID":"749a7a01-9c52-4138-84e2-90d383b8faee","Type":"ContainerStarted","Data":"b7aa59befe6aa14e2c62535d3faa2d6e4da4444511cc44a9e9db311e33de6e4d"} Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.171834 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerStarted","Data":"af1dd5735ad8b248e702cd833f3815904a0d1572d361d3398a840e9a3ab48502"} Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.443622 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.446389 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.451757 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-lfljv" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.483478 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.584315 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trmfj\" (UniqueName: \"kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj\") pod \"kube-state-metrics-0\" (UID: \"849c4c54-e078-43b4-8137-afe141df50cf\") " pod="openstack/kube-state-metrics-0" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.687249 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trmfj\" (UniqueName: \"kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj\") pod \"kube-state-metrics-0\" (UID: \"849c4c54-e078-43b4-8137-afe141df50cf\") " pod="openstack/kube-state-metrics-0" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.718829 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trmfj\" (UniqueName: \"kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj\") pod \"kube-state-metrics-0\" (UID: \"849c4c54-e078-43b4-8137-afe141df50cf\") " pod="openstack/kube-state-metrics-0" Nov 29 07:04:37 crc kubenswrapper[4943]: I1129 07:04:37.771863 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.644548 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5gpxg"] Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.646138 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.648335 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.648344 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.650013 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-dfvhz" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.654068 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg"] Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.670406 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-gfvtr"] Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.672188 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.692273 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gfvtr"] Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762011 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-log\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762256 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-combined-ca-bundle\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762348 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af59b739-6a81-44bf-a1f6-2d6d3038c43f-scripts\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762420 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-run\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762544 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-lib\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762716 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762813 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-log-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762844 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-ovn-controller-tls-certs\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762876 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6qt5\" (UniqueName: \"kubernetes.io/projected/af59b739-6a81-44bf-a1f6-2d6d3038c43f-kube-api-access-d6qt5\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762900 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knd7l\" (UniqueName: \"kubernetes.io/projected/1311e160-08fd-4e7e-9599-031cdf056c62-kube-api-access-knd7l\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762918 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1311e160-08fd-4e7e-9599-031cdf056c62-scripts\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762937 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.762973 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-etc-ovs\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.864865 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-log-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865208 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-ovn-controller-tls-certs\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865247 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6qt5\" (UniqueName: \"kubernetes.io/projected/af59b739-6a81-44bf-a1f6-2d6d3038c43f-kube-api-access-d6qt5\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865279 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knd7l\" (UniqueName: \"kubernetes.io/projected/1311e160-08fd-4e7e-9599-031cdf056c62-kube-api-access-knd7l\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865306 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1311e160-08fd-4e7e-9599-031cdf056c62-scripts\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865339 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865394 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-etc-ovs\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865430 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-log\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865433 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-log-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865453 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-combined-ca-bundle\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865487 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af59b739-6a81-44bf-a1f6-2d6d3038c43f-scripts\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865508 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-run\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865524 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-lib\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865551 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865820 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run-ovn\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.865904 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af59b739-6a81-44bf-a1f6-2d6d3038c43f-var-run\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.866012 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-etc-ovs\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.866059 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-run\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.866193 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-lib\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.866608 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1311e160-08fd-4e7e-9599-031cdf056c62-var-log\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.867918 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1311e160-08fd-4e7e-9599-031cdf056c62-scripts\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.869334 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af59b739-6a81-44bf-a1f6-2d6d3038c43f-scripts\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.872914 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-combined-ca-bundle\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.873151 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/af59b739-6a81-44bf-a1f6-2d6d3038c43f-ovn-controller-tls-certs\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.883062 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6qt5\" (UniqueName: \"kubernetes.io/projected/af59b739-6a81-44bf-a1f6-2d6d3038c43f-kube-api-access-d6qt5\") pod \"ovn-controller-5gpxg\" (UID: \"af59b739-6a81-44bf-a1f6-2d6d3038c43f\") " pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.883985 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knd7l\" (UniqueName: \"kubernetes.io/projected/1311e160-08fd-4e7e-9599-031cdf056c62-kube-api-access-knd7l\") pod \"ovn-controller-ovs-gfvtr\" (UID: \"1311e160-08fd-4e7e-9599-031cdf056c62\") " pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:42 crc kubenswrapper[4943]: I1129 07:04:42.984767 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.023472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.543315 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.544694 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.549598 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.551760 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-7khqt" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.551967 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.552119 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.552267 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.558939 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.676977 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677029 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-config\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677106 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677127 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677152 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whhl6\" (UniqueName: \"kubernetes.io/projected/740a8879-98ab-4937-9d34-8c8563d3c852-kube-api-access-whhl6\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677176 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677198 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.677247 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.778861 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-config\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779003 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779031 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779069 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whhl6\" (UniqueName: \"kubernetes.io/projected/740a8879-98ab-4937-9d34-8c8563d3c852-kube-api-access-whhl6\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779107 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779139 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779186 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779233 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.779528 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.780390 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.780692 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.780825 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/740a8879-98ab-4937-9d34-8c8563d3c852-config\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.783948 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.785977 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.788166 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/740a8879-98ab-4937-9d34-8c8563d3c852-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.799265 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whhl6\" (UniqueName: \"kubernetes.io/projected/740a8879-98ab-4937-9d34-8c8563d3c852-kube-api-access-whhl6\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.805722 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"740a8879-98ab-4937-9d34-8c8563d3c852\") " pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:43 crc kubenswrapper[4943]: I1129 07:04:43.873695 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.328138 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:04:44 crc kubenswrapper[4943]: E1129 07:04:44.328376 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.367520 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.369159 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.370955 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.371226 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.371550 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.372000 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-f2rrg" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.376801 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.495903 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nq6n\" (UniqueName: \"kubernetes.io/projected/d577552f-76e9-4b0f-9ab1-74aec5d11704-kube-api-access-9nq6n\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.495942 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496119 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496152 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-config\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496219 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496237 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496263 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.496305 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598743 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nq6n\" (UniqueName: \"kubernetes.io/projected/d577552f-76e9-4b0f-9ab1-74aec5d11704-kube-api-access-9nq6n\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598800 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598835 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598874 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-config\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598935 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598963 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.598988 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.599025 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.599545 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.600002 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.601655 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-config\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.606752 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.607276 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.607972 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d577552f-76e9-4b0f-9ab1-74aec5d11704-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.608781 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d577552f-76e9-4b0f-9ab1-74aec5d11704-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.617106 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nq6n\" (UniqueName: \"kubernetes.io/projected/d577552f-76e9-4b0f-9ab1-74aec5d11704-kube-api-access-9nq6n\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.621999 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d577552f-76e9-4b0f-9ab1-74aec5d11704\") " pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:44 crc kubenswrapper[4943]: I1129 07:04:44.691677 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 29 07:04:57 crc kubenswrapper[4943]: I1129 07:04:57.327431 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:04:57 crc kubenswrapper[4943]: E1129 07:04:57.328665 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:05:04 crc kubenswrapper[4943]: E1129 07:05:04.600835 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 29 07:05:04 crc kubenswrapper[4943]: E1129 07:05:04.601611 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hvgnj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-8tlj7_openstack(9906bc4a-bff0-4622-b0de-79a0e03dda90): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:04 crc kubenswrapper[4943]: E1129 07:05:04.602898 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" podUID="9906bc4a-bff0-4622-b0de-79a0e03dda90" Nov 29 07:05:04 crc kubenswrapper[4943]: I1129 07:05:04.895818 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:05:04 crc kubenswrapper[4943]: I1129 07:05:04.911554 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 07:05:04 crc kubenswrapper[4943]: I1129 07:05:04.922952 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg"] Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.018527 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.056510 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.082200 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: W1129 07:05:05.088519 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2be496c_a331_4baf_b42b_453be5225812.slice/crio-8fef65242d7053c1f6d01a8f0cb55ca16bac314196b114a85ea430d496e90f78 WatchSource:0}: Error finding container 8fef65242d7053c1f6d01a8f0cb55ca16bac314196b114a85ea430d496e90f78: Status 404 returned error can't find the container with id 8fef65242d7053c1f6d01a8f0cb55ca16bac314196b114a85ea430d496e90f78 Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.088752 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.148706 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: W1129 07:05:05.155036 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod740a8879_98ab_4937_9d34_8c8563d3c852.slice/crio-5cc9a97773b6698cef886706b25316586df3e72d455dac81e62a97c821bbb36f WatchSource:0}: Error finding container 5cc9a97773b6698cef886706b25316586df3e72d455dac81e62a97c821bbb36f: Status 404 returned error can't find the container with id 5cc9a97773b6698cef886706b25316586df3e72d455dac81e62a97c821bbb36f Nov 29 07:05:05 crc kubenswrapper[4943]: W1129 07:05:05.214457 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1311e160_08fd_4e7e_9599_031cdf056c62.slice/crio-cc91bcd8ffa6d7c541445635d3ec8936558105407a0e2859c0f3137aa35c4b87 WatchSource:0}: Error finding container cc91bcd8ffa6d7c541445635d3ec8936558105407a0e2859c0f3137aa35c4b87: Status 404 returned error can't find the container with id cc91bcd8ffa6d7c541445635d3ec8936558105407a0e2859c0f3137aa35c4b87 Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.217162 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gfvtr"] Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.433935 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"57b1dafb-1dfa-4f23-8335-50600bc5becb","Type":"ContainerStarted","Data":"905e23fbc618427a07fa1a0c54f957d8b4bd3980544c0cacfbe6eae7354dab77"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.435871 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"740a8879-98ab-4937-9d34-8c8563d3c852","Type":"ContainerStarted","Data":"5cc9a97773b6698cef886706b25316586df3e72d455dac81e62a97c821bbb36f"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.437604 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg" event={"ID":"af59b739-6a81-44bf-a1f6-2d6d3038c43f","Type":"ContainerStarted","Data":"1de0c78930b9c2e6ea1bb2765fd722d24f6e3d6135b03bfd870b36bbb53ec0ec"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.438934 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerStarted","Data":"5b7c7b01abab4561ef0567d9081d8a6b872b084ed50aa550222e49356fa4ff59"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.441507 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a2be496c-a331-4baf-b42b-453be5225812","Type":"ContainerStarted","Data":"8fef65242d7053c1f6d01a8f0cb55ca16bac314196b114a85ea430d496e90f78"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.448713 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"849c4c54-e078-43b4-8137-afe141df50cf","Type":"ContainerStarted","Data":"8017a9c2c25e5b1cfef7922fe25d68917f096a9a6ea14331cda7e5233f5baf20"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.450535 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gfvtr" event={"ID":"1311e160-08fd-4e7e-9599-031cdf056c62","Type":"ContainerStarted","Data":"cc91bcd8ffa6d7c541445635d3ec8936558105407a0e2859c0f3137aa35c4b87"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.453128 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerStarted","Data":"aaab6ce2ab4f44b00bfef6837d6fcbff565283e4b01c6d9bce80aaa1b439d123"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.460427 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a756fe5f-1037-4ec3-b91a-fdce5d723f04","Type":"ContainerStarted","Data":"6b4d992ac3780d8543b4d9de8e52bafc0aad936f783d033ed928b1fbdb0c61a1"} Nov 29 07:05:05 crc kubenswrapper[4943]: I1129 07:05:05.756948 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 07:05:05 crc kubenswrapper[4943]: E1129 07:05:05.847285 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 29 07:05:05 crc kubenswrapper[4943]: E1129 07:05:05.847495 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mhl8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-7gzb2_openstack(441e3d11-b7c0-4c0b-9db7-18eeb2782242): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:05 crc kubenswrapper[4943]: E1129 07:05:05.849128 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" podUID="441e3d11-b7c0-4c0b-9db7-18eeb2782242" Nov 29 07:05:06 crc kubenswrapper[4943]: W1129 07:05:06.062002 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd577552f_76e9_4b0f_9ab1_74aec5d11704.slice/crio-1584c5117c544b6e3857b79692efbafe712827b1879100a70ecab8c33b5730dd WatchSource:0}: Error finding container 1584c5117c544b6e3857b79692efbafe712827b1879100a70ecab8c33b5730dd: Status 404 returned error can't find the container with id 1584c5117c544b6e3857b79692efbafe712827b1879100a70ecab8c33b5730dd Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.136789 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.273482 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config\") pod \"9906bc4a-bff0-4622-b0de-79a0e03dda90\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.273661 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc\") pod \"9906bc4a-bff0-4622-b0de-79a0e03dda90\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.273910 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvgnj\" (UniqueName: \"kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj\") pod \"9906bc4a-bff0-4622-b0de-79a0e03dda90\" (UID: \"9906bc4a-bff0-4622-b0de-79a0e03dda90\") " Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.274076 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config" (OuterVolumeSpecName: "config") pod "9906bc4a-bff0-4622-b0de-79a0e03dda90" (UID: "9906bc4a-bff0-4622-b0de-79a0e03dda90"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.274358 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9906bc4a-bff0-4622-b0de-79a0e03dda90" (UID: "9906bc4a-bff0-4622-b0de-79a0e03dda90"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.278487 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj" (OuterVolumeSpecName: "kube-api-access-hvgnj") pod "9906bc4a-bff0-4622-b0de-79a0e03dda90" (UID: "9906bc4a-bff0-4622-b0de-79a0e03dda90"). InnerVolumeSpecName "kube-api-access-hvgnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.376099 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvgnj\" (UniqueName: \"kubernetes.io/projected/9906bc4a-bff0-4622-b0de-79a0e03dda90-kube-api-access-hvgnj\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.376146 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.376158 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9906bc4a-bff0-4622-b0de-79a0e03dda90-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.471405 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" event={"ID":"9906bc4a-bff0-4622-b0de-79a0e03dda90","Type":"ContainerDied","Data":"e4ed43579cb576b3db0494b4e16a03664302b83b820a44ddf8c7afe3ccd21b32"} Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.471432 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8tlj7" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.473161 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d577552f-76e9-4b0f-9ab1-74aec5d11704","Type":"ContainerStarted","Data":"1584c5117c544b6e3857b79692efbafe712827b1879100a70ecab8c33b5730dd"} Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.556137 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.564106 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8tlj7"] Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.780680 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.884300 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhl8w\" (UniqueName: \"kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w\") pod \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.884386 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config\") pod \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\" (UID: \"441e3d11-b7c0-4c0b-9db7-18eeb2782242\") " Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.885170 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config" (OuterVolumeSpecName: "config") pod "441e3d11-b7c0-4c0b-9db7-18eeb2782242" (UID: "441e3d11-b7c0-4c0b-9db7-18eeb2782242"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.888882 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w" (OuterVolumeSpecName: "kube-api-access-mhl8w") pod "441e3d11-b7c0-4c0b-9db7-18eeb2782242" (UID: "441e3d11-b7c0-4c0b-9db7-18eeb2782242"). InnerVolumeSpecName "kube-api-access-mhl8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.986328 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhl8w\" (UniqueName: \"kubernetes.io/projected/441e3d11-b7c0-4c0b-9db7-18eeb2782242-kube-api-access-mhl8w\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:06 crc kubenswrapper[4943]: I1129 07:05:06.986357 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/441e3d11-b7c0-4c0b-9db7-18eeb2782242-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.339458 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9906bc4a-bff0-4622-b0de-79a0e03dda90" path="/var/lib/kubelet/pods/9906bc4a-bff0-4622-b0de-79a0e03dda90/volumes" Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.483327 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" event={"ID":"441e3d11-b7c0-4c0b-9db7-18eeb2782242","Type":"ContainerDied","Data":"cd85c804bb05b035a00ab18d7f2e6b529d62f09c189db132988a0b3f98f227c6"} Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.483441 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7gzb2" Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.487388 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerStarted","Data":"588f05a1653422c6b33d96b1a1f5b7aea5c795ea26633df146b9f48f25bce929"} Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.490557 4943 generic.go:334] "Generic (PLEG): container finished" podID="749a7a01-9c52-4138-84e2-90d383b8faee" containerID="15fab7c5c1f129a75671b60175f9c3ed19706578d3e55b29d70fbad5ed0893b2" exitCode=0 Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.490626 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" event={"ID":"749a7a01-9c52-4138-84e2-90d383b8faee","Type":"ContainerDied","Data":"15fab7c5c1f129a75671b60175f9c3ed19706578d3e55b29d70fbad5ed0893b2"} Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.620777 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:05:07 crc kubenswrapper[4943]: I1129 07:05:07.626598 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7gzb2"] Nov 29 07:05:08 crc kubenswrapper[4943]: I1129 07:05:08.501273 4943 generic.go:334] "Generic (PLEG): container finished" podID="6a278192-6447-4d10-93f2-907a904d36dc" containerID="588f05a1653422c6b33d96b1a1f5b7aea5c795ea26633df146b9f48f25bce929" exitCode=0 Nov 29 07:05:08 crc kubenswrapper[4943]: I1129 07:05:08.501372 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerDied","Data":"588f05a1653422c6b33d96b1a1f5b7aea5c795ea26633df146b9f48f25bce929"} Nov 29 07:05:08 crc kubenswrapper[4943]: I1129 07:05:08.507547 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" event={"ID":"749a7a01-9c52-4138-84e2-90d383b8faee","Type":"ContainerStarted","Data":"a0b12c4df4f80463e6d982549171292a595952d926c8bb88c7d40dfa15eb375a"} Nov 29 07:05:08 crc kubenswrapper[4943]: I1129 07:05:08.507985 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:05:08 crc kubenswrapper[4943]: I1129 07:05:08.544799 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" podStartSLOduration=8.706454302000001 podStartE2EDuration="38.544777666s" podCreationTimestamp="2025-11-29 07:04:30 +0000 UTC" firstStartedPulling="2025-11-29 07:04:36.293894625 +0000 UTC m=+1851.223983378" lastFinishedPulling="2025-11-29 07:05:06.132217989 +0000 UTC m=+1881.062306742" observedRunningTime="2025-11-29 07:05:08.537594629 +0000 UTC m=+1883.467683382" watchObservedRunningTime="2025-11-29 07:05:08.544777666 +0000 UTC m=+1883.474866419" Nov 29 07:05:09 crc kubenswrapper[4943]: I1129 07:05:09.339002 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="441e3d11-b7c0-4c0b-9db7-18eeb2782242" path="/var/lib/kubelet/pods/441e3d11-b7c0-4c0b-9db7-18eeb2782242/volumes" Nov 29 07:05:10 crc kubenswrapper[4943]: I1129 07:05:10.327509 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:05:10 crc kubenswrapper[4943]: E1129 07:05:10.328259 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:05:16 crc kubenswrapper[4943]: I1129 07:05:16.354737 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:05:18 crc kubenswrapper[4943]: E1129 07:05:18.660507 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 29 07:05:18 crc kubenswrapper[4943]: E1129 07:05:18.660950 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2jkk8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(57b1dafb-1dfa-4f23-8335-50600bc5becb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:18 crc kubenswrapper[4943]: E1129 07:05:18.662301 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="57b1dafb-1dfa-4f23-8335-50600bc5becb" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.592078 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-k47vk"] Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.594582 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.601202 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.615429 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-k47vk"] Nov 29 07:05:19 crc kubenswrapper[4943]: E1129 07:05:19.639113 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="57b1dafb-1dfa-4f23-8335-50600bc5becb" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.749363 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.780624 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-f7ghw"] Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.781946 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.784687 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795174 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovn-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795272 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-combined-ca-bundle\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795335 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovs-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795352 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g98lr\" (UniqueName: \"kubernetes.io/projected/2736d08a-dfca-4da2-bae5-7917c31200c1-kube-api-access-g98lr\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.795391 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2736d08a-dfca-4da2-bae5-7917c31200c1-config\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.796205 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-f7ghw"] Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897042 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897124 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897169 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovn-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897216 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897233 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-combined-ca-bundle\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897261 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897308 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovs-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897326 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g98lr\" (UniqueName: \"kubernetes.io/projected/2736d08a-dfca-4da2-bae5-7917c31200c1-kube-api-access-g98lr\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897357 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvhb6\" (UniqueName: \"kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897410 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2736d08a-dfca-4da2-bae5-7917c31200c1-config\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.897546 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovn-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.898058 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2736d08a-dfca-4da2-bae5-7917c31200c1-config\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.898154 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2736d08a-dfca-4da2-bae5-7917c31200c1-ovs-rundir\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.908630 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.915793 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2736d08a-dfca-4da2-bae5-7917c31200c1-combined-ca-bundle\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.931513 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g98lr\" (UniqueName: \"kubernetes.io/projected/2736d08a-dfca-4da2-bae5-7917c31200c1-kube-api-access-g98lr\") pod \"ovn-controller-metrics-k47vk\" (UID: \"2736d08a-dfca-4da2-bae5-7917c31200c1\") " pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.999277 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.999375 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvhb6\" (UniqueName: \"kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.999420 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:19 crc kubenswrapper[4943]: I1129 07:05:19.999443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.000307 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.000980 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.001658 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.015813 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-f7ghw"] Nov 29 07:05:20 crc kubenswrapper[4943]: E1129 07:05:20.016354 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-lvhb6], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" podUID="164e7864-4da7-49ef-88b6-d42087865d1b" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.042516 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvhb6\" (UniqueName: \"kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6\") pod \"dnsmasq-dns-7f896c8c65-f7ghw\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.043091 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.044646 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.051660 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.086912 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.204174 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.204249 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.204302 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.204347 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.204418 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmlvb\" (UniqueName: \"kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.229469 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k47vk" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.306774 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.306887 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.306938 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.306969 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.307033 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmlvb\" (UniqueName: \"kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.308055 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.308106 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.308076 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.308401 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.326439 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmlvb\" (UniqueName: \"kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb\") pod \"dnsmasq-dns-86db49b7ff-hbfzw\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.373106 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.642713 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.659918 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.822107 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc\") pod \"164e7864-4da7-49ef-88b6-d42087865d1b\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.822607 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "164e7864-4da7-49ef-88b6-d42087865d1b" (UID: "164e7864-4da7-49ef-88b6-d42087865d1b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.822679 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvhb6\" (UniqueName: \"kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6\") pod \"164e7864-4da7-49ef-88b6-d42087865d1b\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.822731 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb\") pod \"164e7864-4da7-49ef-88b6-d42087865d1b\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.822767 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config\") pod \"164e7864-4da7-49ef-88b6-d42087865d1b\" (UID: \"164e7864-4da7-49ef-88b6-d42087865d1b\") " Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.823147 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.823324 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "164e7864-4da7-49ef-88b6-d42087865d1b" (UID: "164e7864-4da7-49ef-88b6-d42087865d1b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.823512 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config" (OuterVolumeSpecName: "config") pod "164e7864-4da7-49ef-88b6-d42087865d1b" (UID: "164e7864-4da7-49ef-88b6-d42087865d1b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.827381 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6" (OuterVolumeSpecName: "kube-api-access-lvhb6") pod "164e7864-4da7-49ef-88b6-d42087865d1b" (UID: "164e7864-4da7-49ef-88b6-d42087865d1b"). InnerVolumeSpecName "kube-api-access-lvhb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.924044 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvhb6\" (UniqueName: \"kubernetes.io/projected/164e7864-4da7-49ef-88b6-d42087865d1b-kube-api-access-lvhb6\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.924084 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:20 crc kubenswrapper[4943]: I1129 07:05:20.924100 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/164e7864-4da7-49ef-88b6-d42087865d1b-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:05:21 crc kubenswrapper[4943]: E1129 07:05:21.384348 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 29 07:05:21 crc kubenswrapper[4943]: E1129 07:05:21.384885 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clkpj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(5783e23b-47b8-4bbe-99aa-29271dc74d51): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:21 crc kubenswrapper[4943]: E1129 07:05:21.386331 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" Nov 29 07:05:21 crc kubenswrapper[4943]: I1129 07:05:21.649953 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-f7ghw" Nov 29 07:05:21 crc kubenswrapper[4943]: I1129 07:05:21.712296 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-f7ghw"] Nov 29 07:05:21 crc kubenswrapper[4943]: I1129 07:05:21.720495 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-f7ghw"] Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.197615 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" Nov 29 07:05:22 crc kubenswrapper[4943]: I1129 07:05:22.328075 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.328708 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.841770 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.842019 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n654hfbh568hd4h65fh684h569h584h58h75h5cbh664h9fh685h55bh79h695hf4hfh86h589h5bfh5f8hb5h85h58bh685h5d5h89hdbhfh59cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wfrtq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(a2be496c-a331-4baf-b42b-453be5225812): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.843912 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="a2be496c-a331-4baf-b42b-453be5225812" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.859173 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.859466 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd6h595h5c5h8bhd5h55ch597h5b7h596h5c6hc4h594h5cch657h64dh5c7h85h648h549h685h5ch64ch556h5b9h58fhbch595h67bh88hcdh545h67dq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-knd7l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-gfvtr_openstack(1311e160-08fd-4e7e-9599-031cdf056c62): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.860858 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-gfvtr" podUID="1311e160-08fd-4e7e-9599-031cdf056c62" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.879676 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.879882 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7x9v2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(18c9e0d2-f13e-4af5-9f57-22f3f4c6b263): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:05:22 crc kubenswrapper[4943]: E1129 07:05:22.881358 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" Nov 29 07:05:23 crc kubenswrapper[4943]: I1129 07:05:23.338947 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="164e7864-4da7-49ef-88b6-d42087865d1b" path="/var/lib/kubelet/pods/164e7864-4da7-49ef-88b6-d42087865d1b/volumes" Nov 29 07:05:23 crc kubenswrapper[4943]: E1129 07:05:23.665368 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" Nov 29 07:05:23 crc kubenswrapper[4943]: E1129 07:05:23.666610 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-gfvtr" podUID="1311e160-08fd-4e7e-9599-031cdf056c62" Nov 29 07:05:23 crc kubenswrapper[4943]: E1129 07:05:23.666650 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="a2be496c-a331-4baf-b42b-453be5225812" Nov 29 07:05:24 crc kubenswrapper[4943]: I1129 07:05:24.331619 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-k47vk"] Nov 29 07:05:24 crc kubenswrapper[4943]: I1129 07:05:24.578404 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:05:25 crc kubenswrapper[4943]: I1129 07:05:25.682589 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerStarted","Data":"341abfff15d9ccaa1fad314896acd2ef8eac5bbeb06ccfcc41f15d706076b69d"} Nov 29 07:05:25 crc kubenswrapper[4943]: I1129 07:05:25.683953 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k47vk" event={"ID":"2736d08a-dfca-4da2-bae5-7917c31200c1","Type":"ContainerStarted","Data":"40db3eea7e77e76eab00e0f675015e88959f33515516b5b59f90bd4dbb822b4a"} Nov 29 07:05:35 crc kubenswrapper[4943]: I1129 07:05:35.332363 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:05:35 crc kubenswrapper[4943]: E1129 07:05:35.333173 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:05:43 crc kubenswrapper[4943]: I1129 07:05:43.821986 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerStarted","Data":"c78192575842cf741abace39f6329e17412bac880f9f68c99f8c40f4ebf86c8c"} Nov 29 07:05:44 crc kubenswrapper[4943]: I1129 07:05:44.828993 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" containerID="cri-o://c78192575842cf741abace39f6329e17412bac880f9f68c99f8c40f4ebf86c8c" gracePeriod=10 Nov 29 07:05:44 crc kubenswrapper[4943]: I1129 07:05:44.829318 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:05:44 crc kubenswrapper[4943]: I1129 07:05:44.849459 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podStartSLOduration=42.936787459 podStartE2EDuration="1m13.849439752s" podCreationTimestamp="2025-11-29 07:04:31 +0000 UTC" firstStartedPulling="2025-11-29 07:04:36.293852574 +0000 UTC m=+1851.223941327" lastFinishedPulling="2025-11-29 07:05:07.206504867 +0000 UTC m=+1882.136593620" observedRunningTime="2025-11-29 07:05:44.84893374 +0000 UTC m=+1919.779022503" watchObservedRunningTime="2025-11-29 07:05:44.849439752 +0000 UTC m=+1919.779528505" Nov 29 07:05:45 crc kubenswrapper[4943]: I1129 07:05:45.838392 4943 generic.go:334] "Generic (PLEG): container finished" podID="6a278192-6447-4d10-93f2-907a904d36dc" containerID="c78192575842cf741abace39f6329e17412bac880f9f68c99f8c40f4ebf86c8c" exitCode=0 Nov 29 07:05:45 crc kubenswrapper[4943]: I1129 07:05:45.838588 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerDied","Data":"c78192575842cf741abace39f6329e17412bac880f9f68c99f8c40f4ebf86c8c"} Nov 29 07:05:47 crc kubenswrapper[4943]: E1129 07:05:47.179124 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 29 07:05:47 crc kubenswrapper[4943]: E1129 07:05:47.179624 4943 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 29 07:05:47 crc kubenswrapper[4943]: E1129 07:05:47.180992 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-trmfj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(849c4c54-e078-43b4-8137-afe141df50cf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 07:05:47 crc kubenswrapper[4943]: E1129 07:05:47.184683 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="849c4c54-e078-43b4-8137-afe141df50cf" Nov 29 07:05:48 crc kubenswrapper[4943]: I1129 07:05:48.328036 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:05:48 crc kubenswrapper[4943]: E1129 07:05:48.329674 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:05:49 crc kubenswrapper[4943]: E1129 07:05:49.997530 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="849c4c54-e078-43b4-8137-afe141df50cf" Nov 29 07:05:56 crc kubenswrapper[4943]: I1129 07:05:56.707384 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:00 crc kubenswrapper[4943]: I1129 07:06:00.327546 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:06:00 crc kubenswrapper[4943]: E1129 07:06:00.327829 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:06:01 crc kubenswrapper[4943]: I1129 07:06:01.707956 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:06 crc kubenswrapper[4943]: I1129 07:06:06.708219 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:11 crc kubenswrapper[4943]: I1129 07:06:11.709777 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:15 crc kubenswrapper[4943]: I1129 07:06:15.333742 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:06:15 crc kubenswrapper[4943]: E1129 07:06:15.334200 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:06:16 crc kubenswrapper[4943]: I1129 07:06:16.711285 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.469979 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.667257 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kc22s\" (UniqueName: \"kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s\") pod \"6a278192-6447-4d10-93f2-907a904d36dc\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.667412 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config\") pod \"6a278192-6447-4d10-93f2-907a904d36dc\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.667450 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc\") pod \"6a278192-6447-4d10-93f2-907a904d36dc\" (UID: \"6a278192-6447-4d10-93f2-907a904d36dc\") " Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.680607 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s" (OuterVolumeSpecName: "kube-api-access-kc22s") pod "6a278192-6447-4d10-93f2-907a904d36dc" (UID: "6a278192-6447-4d10-93f2-907a904d36dc"). InnerVolumeSpecName "kube-api-access-kc22s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.708587 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config" (OuterVolumeSpecName: "config") pod "6a278192-6447-4d10-93f2-907a904d36dc" (UID: "6a278192-6447-4d10-93f2-907a904d36dc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.710481 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6a278192-6447-4d10-93f2-907a904d36dc" (UID: "6a278192-6447-4d10-93f2-907a904d36dc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.768883 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.768930 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a278192-6447-4d10-93f2-907a904d36dc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:18 crc kubenswrapper[4943]: I1129 07:06:18.768944 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kc22s\" (UniqueName: \"kubernetes.io/projected/6a278192-6447-4d10-93f2-907a904d36dc-kube-api-access-kc22s\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.081249 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" event={"ID":"6a278192-6447-4d10-93f2-907a904d36dc","Type":"ContainerDied","Data":"af1dd5735ad8b248e702cd833f3815904a0d1572d361d3398a840e9a3ab48502"} Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.081301 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.081320 4943 scope.go:117] "RemoveContainer" containerID="c78192575842cf741abace39f6329e17412bac880f9f68c99f8c40f4ebf86c8c" Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.122003 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.131122 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-86p65"] Nov 29 07:06:19 crc kubenswrapper[4943]: I1129 07:06:19.339067 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a278192-6447-4d10-93f2-907a904d36dc" path="/var/lib/kubelet/pods/6a278192-6447-4d10-93f2-907a904d36dc/volumes" Nov 29 07:06:21 crc kubenswrapper[4943]: I1129 07:06:21.711917 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-86p65" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 29 07:06:25 crc kubenswrapper[4943]: I1129 07:06:25.138303 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d577552f-76e9-4b0f-9ab1-74aec5d11704","Type":"ContainerStarted","Data":"5ddee04d64c9df5bab0f87f54ac2ce04b3a8dd5e7e0f977a295c7d32da72b1db"} Nov 29 07:06:25 crc kubenswrapper[4943]: I1129 07:06:25.140203 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a756fe5f-1037-4ec3-b91a-fdce5d723f04","Type":"ContainerStarted","Data":"debc737beb2a2c3891a93c8c984163fa09785dd1d54eb1b86589fcccc5032fd2"} Nov 29 07:06:29 crc kubenswrapper[4943]: I1129 07:06:29.661274 4943 scope.go:117] "RemoveContainer" containerID="588f05a1653422c6b33d96b1a1f5b7aea5c795ea26633df146b9f48f25bce929" Nov 29 07:06:30 crc kubenswrapper[4943]: I1129 07:06:30.328439 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:06:30 crc kubenswrapper[4943]: E1129 07:06:30.329307 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:06:32 crc kubenswrapper[4943]: I1129 07:06:32.203745 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg" event={"ID":"af59b739-6a81-44bf-a1f6-2d6d3038c43f","Type":"ContainerStarted","Data":"c03c8050f083b8004a65cef2ddb51cdd09c68a5c0a0793acb0c4048db5db3b44"} Nov 29 07:06:32 crc kubenswrapper[4943]: I1129 07:06:32.204231 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-5gpxg" Nov 29 07:06:32 crc kubenswrapper[4943]: I1129 07:06:32.227237 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-5gpxg" podStartSLOduration=91.249305756 podStartE2EDuration="1m50.227220744s" podCreationTimestamp="2025-11-29 07:04:42 +0000 UTC" firstStartedPulling="2025-11-29 07:05:04.973806173 +0000 UTC m=+1879.903894926" lastFinishedPulling="2025-11-29 07:05:23.951721151 +0000 UTC m=+1898.881809914" observedRunningTime="2025-11-29 07:06:32.220358784 +0000 UTC m=+1967.150447547" watchObservedRunningTime="2025-11-29 07:06:32.227220744 +0000 UTC m=+1967.157309497" Nov 29 07:06:32 crc kubenswrapper[4943]: E1129 07:06:32.710309 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4100073337/3\": happened during read: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Nov 29 07:06:32 crc kubenswrapper[4943]: E1129 07:06:32.710489 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n5cdh659hd9h95h66fh576h84h8h9ch5c5h5h678h559h98h556h5f5h68fh7fh656h87h55h684h5c4h78h6bhb9h665hc5h549h547h5c6h646q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovs-rundir,ReadOnly:true,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:true,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g98lr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-metrics-k47vk_openstack(2736d08a-dfca-4da2-bae5-7917c31200c1): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage4100073337/3\": happened during read: context canceled" logger="UnhandledError" Nov 29 07:06:32 crc kubenswrapper[4943]: E1129 07:06:32.712400 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage4100073337/3\\\": happened during read: context canceled\"" pod="openstack/ovn-controller-metrics-k47vk" podUID="2736d08a-dfca-4da2-bae5-7917c31200c1" Nov 29 07:06:33 crc kubenswrapper[4943]: I1129 07:06:33.219805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"740a8879-98ab-4937-9d34-8c8563d3c852","Type":"ContainerStarted","Data":"f70b6a1c1d3a64a89a09ea7324f9d8ab6ca19634eed0cfa802a00bdd06bfde9e"} Nov 29 07:06:33 crc kubenswrapper[4943]: E1129 07:06:33.221285 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovn-controller-metrics-k47vk" podUID="2736d08a-dfca-4da2-bae5-7917c31200c1" Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.227270 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerStarted","Data":"2cffdf91f2e436cecc6bc18b94af5b4e5dcab345f341000e6050435f1c28e2bf"} Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.229190 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gfvtr" event={"ID":"1311e160-08fd-4e7e-9599-031cdf056c62","Type":"ContainerStarted","Data":"fcf5a16ea8e20988c4344104101360ab74030ab4c95d4d57e819961c2096ec15"} Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.230801 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerStarted","Data":"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1"} Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.233072 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerStarted","Data":"ddd5cf83322bdfcf75f871299db3066abf8c67fc794c434dbee1ed9734efc31a"} Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.234553 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"57b1dafb-1dfa-4f23-8335-50600bc5becb","Type":"ContainerStarted","Data":"d978eca9e16a4650664c63d58b83ae63e78a8bbb25bd6ed9e076ce7c1fee003c"} Nov 29 07:06:34 crc kubenswrapper[4943]: I1129 07:06:34.235748 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a2be496c-a331-4baf-b42b-453be5225812","Type":"ContainerStarted","Data":"754db04643b4f9d5b638ac3f57bf52d8b88155f11bdbc249c84f6b7bb3d94534"} Nov 29 07:06:35 crc kubenswrapper[4943]: I1129 07:06:35.244361 4943 generic.go:334] "Generic (PLEG): container finished" podID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerID="2cffdf91f2e436cecc6bc18b94af5b4e5dcab345f341000e6050435f1c28e2bf" exitCode=0 Nov 29 07:06:35 crc kubenswrapper[4943]: I1129 07:06:35.244538 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerDied","Data":"2cffdf91f2e436cecc6bc18b94af5b4e5dcab345f341000e6050435f1c28e2bf"} Nov 29 07:06:35 crc kubenswrapper[4943]: I1129 07:06:35.386998 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=33.845679014 podStartE2EDuration="2m0.386978871s" podCreationTimestamp="2025-11-29 07:04:35 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.095402977 +0000 UTC m=+1880.025491730" lastFinishedPulling="2025-11-29 07:06:31.636702794 +0000 UTC m=+1966.566791587" observedRunningTime="2025-11-29 07:06:35.384415648 +0000 UTC m=+1970.314504401" watchObservedRunningTime="2025-11-29 07:06:35.386978871 +0000 UTC m=+1970.317067624" Nov 29 07:06:35 crc kubenswrapper[4943]: I1129 07:06:35.962696 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 29 07:06:37 crc kubenswrapper[4943]: I1129 07:06:37.260470 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerStarted","Data":"22708a8daaa103d32550350f83eb33713ec48977461897c10ab2c1a34d1f3184"} Nov 29 07:06:37 crc kubenswrapper[4943]: I1129 07:06:37.260895 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:06:37 crc kubenswrapper[4943]: I1129 07:06:37.262882 4943 generic.go:334] "Generic (PLEG): container finished" podID="1311e160-08fd-4e7e-9599-031cdf056c62" containerID="fcf5a16ea8e20988c4344104101360ab74030ab4c95d4d57e819961c2096ec15" exitCode=0 Nov 29 07:06:37 crc kubenswrapper[4943]: I1129 07:06:37.262933 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gfvtr" event={"ID":"1311e160-08fd-4e7e-9599-031cdf056c62","Type":"ContainerDied","Data":"fcf5a16ea8e20988c4344104101360ab74030ab4c95d4d57e819961c2096ec15"} Nov 29 07:06:37 crc kubenswrapper[4943]: I1129 07:06:37.288689 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" podStartSLOduration=77.288666688 podStartE2EDuration="1m17.288666688s" podCreationTimestamp="2025-11-29 07:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:06:37.27660156 +0000 UTC m=+1972.206690313" watchObservedRunningTime="2025-11-29 07:06:37.288666688 +0000 UTC m=+1972.218755441" Nov 29 07:06:40 crc kubenswrapper[4943]: I1129 07:06:40.292820 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gfvtr" event={"ID":"1311e160-08fd-4e7e-9599-031cdf056c62","Type":"ContainerStarted","Data":"6a7aae4ab7643f612283d2b3c511a7a1b94589cdd84ed3d011ec8bc2dc99cedc"} Nov 29 07:06:40 crc kubenswrapper[4943]: I1129 07:06:40.963734 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 29 07:06:45 crc kubenswrapper[4943]: I1129 07:06:45.334868 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:06:45 crc kubenswrapper[4943]: E1129 07:06:45.335794 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:06:45 crc kubenswrapper[4943]: I1129 07:06:45.374763 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:06:45 crc kubenswrapper[4943]: I1129 07:06:45.430480 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:06:45 crc kubenswrapper[4943]: I1129 07:06:45.431205 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" containerID="cri-o://a0b12c4df4f80463e6d982549171292a595952d926c8bb88c7d40dfa15eb375a" gracePeriod=10 Nov 29 07:06:46 crc kubenswrapper[4943]: I1129 07:06:46.354246 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.101:5353: connect: connection refused" Nov 29 07:06:47 crc kubenswrapper[4943]: I1129 07:06:47.330089 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:06:47 crc kubenswrapper[4943]: I1129 07:06:47.354745 4943 generic.go:334] "Generic (PLEG): container finished" podID="749a7a01-9c52-4138-84e2-90d383b8faee" containerID="a0b12c4df4f80463e6d982549171292a595952d926c8bb88c7d40dfa15eb375a" exitCode=0 Nov 29 07:06:47 crc kubenswrapper[4943]: I1129 07:06:47.354814 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" event={"ID":"749a7a01-9c52-4138-84e2-90d383b8faee","Type":"ContainerDied","Data":"a0b12c4df4f80463e6d982549171292a595952d926c8bb88c7d40dfa15eb375a"} Nov 29 07:06:51 crc kubenswrapper[4943]: I1129 07:06:51.355046 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.101:5353: connect: connection refused" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.352858 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.427184 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" event={"ID":"749a7a01-9c52-4138-84e2-90d383b8faee","Type":"ContainerDied","Data":"b7aa59befe6aa14e2c62535d3faa2d6e4da4444511cc44a9e9db311e33de6e4d"} Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.427233 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tmzm2" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.427247 4943 scope.go:117] "RemoveContainer" containerID="a0b12c4df4f80463e6d982549171292a595952d926c8bb88c7d40dfa15eb375a" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.436689 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jz27\" (UniqueName: \"kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27\") pod \"749a7a01-9c52-4138-84e2-90d383b8faee\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.436871 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") pod \"749a7a01-9c52-4138-84e2-90d383b8faee\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.436897 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc\") pod \"749a7a01-9c52-4138-84e2-90d383b8faee\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.447892 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27" (OuterVolumeSpecName: "kube-api-access-4jz27") pod "749a7a01-9c52-4138-84e2-90d383b8faee" (UID: "749a7a01-9c52-4138-84e2-90d383b8faee"). InnerVolumeSpecName "kube-api-access-4jz27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:06:55 crc kubenswrapper[4943]: E1129 07:06:55.475290 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config podName:749a7a01-9c52-4138-84e2-90d383b8faee nodeName:}" failed. No retries permitted until 2025-11-29 07:06:55.975263357 +0000 UTC m=+1990.905352110 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config" (UniqueName: "kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config") pod "749a7a01-9c52-4138-84e2-90d383b8faee" (UID: "749a7a01-9c52-4138-84e2-90d383b8faee") : error deleting /var/lib/kubelet/pods/749a7a01-9c52-4138-84e2-90d383b8faee/volume-subpaths: remove /var/lib/kubelet/pods/749a7a01-9c52-4138-84e2-90d383b8faee/volume-subpaths: no such file or directory Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.475708 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "749a7a01-9c52-4138-84e2-90d383b8faee" (UID: "749a7a01-9c52-4138-84e2-90d383b8faee"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.538920 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jz27\" (UniqueName: \"kubernetes.io/projected/749a7a01-9c52-4138-84e2-90d383b8faee-kube-api-access-4jz27\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:55 crc kubenswrapper[4943]: I1129 07:06:55.539239 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:56 crc kubenswrapper[4943]: I1129 07:06:56.046534 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") pod \"749a7a01-9c52-4138-84e2-90d383b8faee\" (UID: \"749a7a01-9c52-4138-84e2-90d383b8faee\") " Nov 29 07:06:56 crc kubenswrapper[4943]: I1129 07:06:56.047069 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config" (OuterVolumeSpecName: "config") pod "749a7a01-9c52-4138-84e2-90d383b8faee" (UID: "749a7a01-9c52-4138-84e2-90d383b8faee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:06:56 crc kubenswrapper[4943]: I1129 07:06:56.149385 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/749a7a01-9c52-4138-84e2-90d383b8faee-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:06:56 crc kubenswrapper[4943]: I1129 07:06:56.369118 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:06:56 crc kubenswrapper[4943]: I1129 07:06:56.376227 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tmzm2"] Nov 29 07:06:57 crc kubenswrapper[4943]: I1129 07:06:57.344313 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" path="/var/lib/kubelet/pods/749a7a01-9c52-4138-84e2-90d383b8faee/volumes" Nov 29 07:06:58 crc kubenswrapper[4943]: I1129 07:06:58.327738 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:06:58 crc kubenswrapper[4943]: E1129 07:06:58.328267 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:07:00 crc kubenswrapper[4943]: E1129 07:07:00.305789 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Nov 29 07:07:00 crc kubenswrapper[4943]: E1129 07:07:00.306021 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n586h655h75h94h596h5bbh5fh67bhfdh589h5d4h5fdh598hdfh599h686h65chb6hc8h85hdch58bh699hb8h58dh55dh576h688h6dh645h586h664q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-whhl6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(740a8879-98ab-4937-9d34-8c8563d3c852): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:07:00 crc kubenswrapper[4943]: E1129 07:07:00.307302 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="740a8879-98ab-4937-9d34-8c8563d3c852" Nov 29 07:07:01 crc kubenswrapper[4943]: I1129 07:07:01.874733 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 29 07:07:01 crc kubenswrapper[4943]: I1129 07:07:01.915001 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 29 07:07:03 crc kubenswrapper[4943]: I1129 07:07:03.020667 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:03 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:03 crc kubenswrapper[4943]: > Nov 29 07:07:03 crc kubenswrapper[4943]: I1129 07:07:03.874294 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 29 07:07:03 crc kubenswrapper[4943]: I1129 07:07:03.911408 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 29 07:07:05 crc kubenswrapper[4943]: I1129 07:07:05.055071 4943 scope.go:117] "RemoveContainer" containerID="15fab7c5c1f129a75671b60175f9c3ed19706578d3e55b29d70fbad5ed0893b2" Nov 29 07:07:05 crc kubenswrapper[4943]: E1129 07:07:05.055228 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="740a8879-98ab-4937-9d34-8c8563d3c852" Nov 29 07:07:06 crc kubenswrapper[4943]: I1129 07:07:06.519423 4943 generic.go:334] "Generic (PLEG): container finished" podID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerID="e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1" exitCode=0 Nov 29 07:07:06 crc kubenswrapper[4943]: I1129 07:07:06.519589 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerDied","Data":"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1"} Nov 29 07:07:06 crc kubenswrapper[4943]: I1129 07:07:06.522432 4943 generic.go:334] "Generic (PLEG): container finished" podID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerID="ddd5cf83322bdfcf75f871299db3066abf8c67fc794c434dbee1ed9734efc31a" exitCode=0 Nov 29 07:07:06 crc kubenswrapper[4943]: I1129 07:07:06.522483 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerDied","Data":"ddd5cf83322bdfcf75f871299db3066abf8c67fc794c434dbee1ed9734efc31a"} Nov 29 07:07:07 crc kubenswrapper[4943]: I1129 07:07:07.539878 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"849c4c54-e078-43b4-8137-afe141df50cf","Type":"ContainerStarted","Data":"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db"} Nov 29 07:07:07 crc kubenswrapper[4943]: I1129 07:07:07.542689 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gfvtr" event={"ID":"1311e160-08fd-4e7e-9599-031cdf056c62","Type":"ContainerStarted","Data":"e459e10223ac1c349e0b1e8cdb302b33b5fd3c579d44e6a69defdd934b314573"} Nov 29 07:07:07 crc kubenswrapper[4943]: I1129 07:07:07.544195 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k47vk" event={"ID":"2736d08a-dfca-4da2-bae5-7917c31200c1","Type":"ContainerStarted","Data":"a245c033d96f78658c19ba96e60cdbacec4e8c70edcc7f1aac68a9f7461b610a"} Nov 29 07:07:08 crc kubenswrapper[4943]: I1129 07:07:08.031911 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:08 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:08 crc kubenswrapper[4943]: > Nov 29 07:07:08 crc kubenswrapper[4943]: I1129 07:07:08.552344 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"740a8879-98ab-4937-9d34-8c8563d3c852","Type":"ContainerStarted","Data":"5d5bdf920b248b9231019dac77d71401e1ab0f599dc622acc73619ffd93a08f6"} Nov 29 07:07:08 crc kubenswrapper[4943]: I1129 07:07:08.554748 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerStarted","Data":"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa"} Nov 29 07:07:08 crc kubenswrapper[4943]: I1129 07:07:08.556401 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerStarted","Data":"2088ab4e238ed6a338a2136a8502ad6d693eb3bb0a2120b43361a2075d614487"} Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.327962 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:07:09 crc kubenswrapper[4943]: E1129 07:07:09.328289 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.563218 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.563694 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.582581 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-k47vk" podStartSLOduration=10.582334483 podStartE2EDuration="1m50.582548529s" podCreationTimestamp="2025-11-29 07:05:19 +0000 UTC" firstStartedPulling="2025-11-29 07:05:25.578249081 +0000 UTC m=+1900.508337834" lastFinishedPulling="2025-11-29 07:07:05.578463127 +0000 UTC m=+2000.508551880" observedRunningTime="2025-11-29 07:07:09.580865467 +0000 UTC m=+2004.510954220" watchObservedRunningTime="2025-11-29 07:07:09.582548529 +0000 UTC m=+2004.512637282" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.613962 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=74.086624306 podStartE2EDuration="2m38.613943996s" podCreationTimestamp="2025-11-29 07:04:31 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.097883088 +0000 UTC m=+1880.027971841" lastFinishedPulling="2025-11-29 07:06:29.625202768 +0000 UTC m=+1964.555291531" observedRunningTime="2025-11-29 07:07:09.612706106 +0000 UTC m=+2004.542794869" watchObservedRunningTime="2025-11-29 07:07:09.613943996 +0000 UTC m=+2004.544032759" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.618755 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.655042 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=72.033056844 podStartE2EDuration="2m38.655021663s" podCreationTimestamp="2025-11-29 07:04:31 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.03234933 +0000 UTC m=+1879.962438093" lastFinishedPulling="2025-11-29 07:06:31.654314159 +0000 UTC m=+1966.584402912" observedRunningTime="2025-11-29 07:07:09.647919858 +0000 UTC m=+2004.578008631" watchObservedRunningTime="2025-11-29 07:07:09.655021663 +0000 UTC m=+2004.585110416" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.672171 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=127.248252269 podStartE2EDuration="2m27.672151647s" podCreationTimestamp="2025-11-29 07:04:42 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.157681185 +0000 UTC m=+1880.087769938" lastFinishedPulling="2025-11-29 07:05:25.581580563 +0000 UTC m=+1900.511669316" observedRunningTime="2025-11-29 07:07:09.671534802 +0000 UTC m=+2004.601623575" watchObservedRunningTime="2025-11-29 07:07:09.672151647 +0000 UTC m=+2004.602240400" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.700913 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=42.334615706 podStartE2EDuration="2m32.70089827s" podCreationTimestamp="2025-11-29 07:04:37 +0000 UTC" firstStartedPulling="2025-11-29 07:05:04.922847134 +0000 UTC m=+1879.852935947" lastFinishedPulling="2025-11-29 07:06:55.289129758 +0000 UTC m=+1990.219218511" observedRunningTime="2025-11-29 07:07:09.698034269 +0000 UTC m=+2004.628123022" watchObservedRunningTime="2025-11-29 07:07:09.70089827 +0000 UTC m=+2004.630987023" Nov 29 07:07:09 crc kubenswrapper[4943]: I1129 07:07:09.725241 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-gfvtr" podStartSLOduration=63.321160727 podStartE2EDuration="2m27.725217671s" podCreationTimestamp="2025-11-29 07:04:42 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.218745134 +0000 UTC m=+1880.148833887" lastFinishedPulling="2025-11-29 07:06:29.622802078 +0000 UTC m=+1964.552890831" observedRunningTime="2025-11-29 07:07:09.720937705 +0000 UTC m=+2004.651026458" watchObservedRunningTime="2025-11-29 07:07:09.725217671 +0000 UTC m=+2004.655306424" Nov 29 07:07:10 crc kubenswrapper[4943]: I1129 07:07:10.569322 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:07:13 crc kubenswrapper[4943]: I1129 07:07:13.018219 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:13 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:13 crc kubenswrapper[4943]: > Nov 29 07:07:13 crc kubenswrapper[4943]: I1129 07:07:13.247630 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:07:13 crc kubenswrapper[4943]: I1129 07:07:13.269863 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 29 07:07:17 crc kubenswrapper[4943]: I1129 07:07:17.658028 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d577552f-76e9-4b0f-9ab1-74aec5d11704","Type":"ContainerStarted","Data":"c654d9a2ae72dc28f7b41bcda8123d9c940e6cf923936a97717fb65983e213c7"} Nov 29 07:07:17 crc kubenswrapper[4943]: I1129 07:07:17.783017 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 29 07:07:18 crc kubenswrapper[4943]: I1129 07:07:18.023170 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:18 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:18 crc kubenswrapper[4943]: > Nov 29 07:07:18 crc kubenswrapper[4943]: I1129 07:07:18.690633 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=28.440345152 podStartE2EDuration="2m35.690613657s" podCreationTimestamp="2025-11-29 07:04:43 +0000 UTC" firstStartedPulling="2025-11-29 07:05:06.100031824 +0000 UTC m=+1881.030120577" lastFinishedPulling="2025-11-29 07:07:13.350300329 +0000 UTC m=+2008.280389082" observedRunningTime="2025-11-29 07:07:18.68506053 +0000 UTC m=+2013.615149303" watchObservedRunningTime="2025-11-29 07:07:18.690613657 +0000 UTC m=+2013.620702410" Nov 29 07:07:19 crc kubenswrapper[4943]: I1129 07:07:19.692135 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 29 07:07:20 crc kubenswrapper[4943]: I1129 07:07:20.692622 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 29 07:07:20 crc kubenswrapper[4943]: I1129 07:07:20.735610 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.737469 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.920799 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 29 07:07:21 crc kubenswrapper[4943]: E1129 07:07:21.921855 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="init" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.921917 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="init" Nov 29 07:07:21 crc kubenswrapper[4943]: E1129 07:07:21.921932 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.921938 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: E1129 07:07:21.921951 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.921990 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: E1129 07:07:21.922003 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="init" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.922008 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="init" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.922336 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="749a7a01-9c52-4138-84e2-90d383b8faee" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.922361 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a278192-6447-4d10-93f2-907a904d36dc" containerName="dnsmasq-dns" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.925348 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.929929 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-bjvxg" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.930246 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.930331 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.931130 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 29 07:07:21 crc kubenswrapper[4943]: I1129 07:07:21.940275 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.060383 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.060479 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-config\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.060543 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.060866 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.061027 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpqb8\" (UniqueName: \"kubernetes.io/projected/2c300f9b-996e-4271-992d-932fbbb5e64f-kube-api-access-dpqb8\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.061083 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-scripts\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.061112 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.162629 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.162970 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-config\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.163134 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.163234 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.163387 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpqb8\" (UniqueName: \"kubernetes.io/projected/2c300f9b-996e-4271-992d-932fbbb5e64f-kube-api-access-dpqb8\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.163477 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-scripts\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.163557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.164363 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.164704 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-scripts\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.165331 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c300f9b-996e-4271-992d-932fbbb5e64f-config\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.184666 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.185028 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.185818 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpqb8\" (UniqueName: \"kubernetes.io/projected/2c300f9b-996e-4271-992d-932fbbb5e64f-kube-api-access-dpqb8\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.186971 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c300f9b-996e-4271-992d-932fbbb5e64f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2c300f9b-996e-4271-992d-932fbbb5e64f\") " pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.256067 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.327296 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:07:22 crc kubenswrapper[4943]: I1129 07:07:22.828236 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 29 07:07:22 crc kubenswrapper[4943]: W1129 07:07:22.832289 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c300f9b_996e_4271_992d_932fbbb5e64f.slice/crio-dc2974dfdc787786d8dd900d10c06c8e85e542f227d20042166870caadf85981 WatchSource:0}: Error finding container dc2974dfdc787786d8dd900d10c06c8e85e542f227d20042166870caadf85981: Status 404 returned error can't find the container with id dc2974dfdc787786d8dd900d10c06c8e85e542f227d20042166870caadf85981 Nov 29 07:07:23 crc kubenswrapper[4943]: I1129 07:07:23.019066 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:23 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:23 crc kubenswrapper[4943]: > Nov 29 07:07:23 crc kubenswrapper[4943]: I1129 07:07:23.249463 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 29 07:07:23 crc kubenswrapper[4943]: I1129 07:07:23.271524 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Nov 29 07:07:23 crc kubenswrapper[4943]: I1129 07:07:23.717606 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65"} Nov 29 07:07:23 crc kubenswrapper[4943]: I1129 07:07:23.735943 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c300f9b-996e-4271-992d-932fbbb5e64f","Type":"ContainerStarted","Data":"dc2974dfdc787786d8dd900d10c06c8e85e542f227d20042166870caadf85981"} Nov 29 07:07:28 crc kubenswrapper[4943]: I1129 07:07:28.023445 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:28 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:28 crc kubenswrapper[4943]: > Nov 29 07:07:33 crc kubenswrapper[4943]: I1129 07:07:33.019105 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:33 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:33 crc kubenswrapper[4943]: > Nov 29 07:07:33 crc kubenswrapper[4943]: I1129 07:07:33.248019 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 29 07:07:33 crc kubenswrapper[4943]: I1129 07:07:33.269851 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Nov 29 07:07:33 crc kubenswrapper[4943]: E1129 07:07:33.987954 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified" Nov 29 07:07:33 crc kubenswrapper[4943]: E1129 07:07:33.988926 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-northd,Image:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,Command:[/usr/bin/ovn-northd],Args:[-vfile:off -vconsole:info --n-threads=1 --ovnnb-db=ssl:ovsdbserver-nb-0.openstack.svc.cluster.local:6641 --ovnsb-db=ssl:ovsdbserver-sb-0.openstack.svc.cluster.local:6642 --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf5h549h68bh554h95h9bh674h55ch57h5b5h5f4h5b6h59fh648hf5h9fhd5hd9h65bh54dhb5h544h79h5dfh5b8h678h84h64fhd6h597h5c9h54cq,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:certs,Value:nf5h655h8dh87h64fh599hdbh5b9h67dh77h566h668h555h5bh56ch65h74h67ch5f4hdbh697h556h64bh664hf9h66h58h5d7h595h64dhb8h556q,ValueFrom:nil,},EnvVar{Name:certs_metrics,Value:n68dhf4h5d8h68bh67h68h59h575h66ch5b9h89h666h647h5b4hdchd8hbch77h699h66dh5b8h79h5c7h694h665hd6h645h647h5f4h9dh677h578q,ValueFrom:nil,},EnvVar{Name:ovnnorthd-config,Value:n5c8h7ch56bh8dh8hc4h5dch9dh68h6bhb7h598h549h5dbh66fh6bh5b4h5cch5d6h55ch57fhfch588h89h5ddh5d6h65bh65bh8dhc4h67dh569q,ValueFrom:nil,},EnvVar{Name:ovnnorthd-scripts,Value:n664hd8h66ch58dh64hc9h66bhd4h558h697h67bh557hdch664h567h669h555h696h556h556h5fh5bh569hbh665h9dh4h9bh564hc8h5b7h5c4q,ValueFrom:nil,},EnvVar{Name:tls-ca-bundle.pem,Value:n655hdbh58ch665h69hffh696h79h5f6hf7h66fh68chdfh5d8h97h5b7h59bh54h59bhf8hc7h5cfh547h56fhd7hf9h9fh8h54ch64fh79hb8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dpqb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-northd-0_openstack(2c300f9b-996e-4271-992d-932fbbb5e64f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:07:34 crc kubenswrapper[4943]: E1129 07:07:34.284731 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-northd-0" podUID="2c300f9b-996e-4271-992d-932fbbb5e64f" Nov 29 07:07:34 crc kubenswrapper[4943]: I1129 07:07:34.827118 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c300f9b-996e-4271-992d-932fbbb5e64f","Type":"ContainerStarted","Data":"7bfff861cb6bcc646d37e91b00723b88c3a05738b541f21efcf42eec8cb70257"} Nov 29 07:07:34 crc kubenswrapper[4943]: E1129 07:07:34.829094 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified\\\"\"" pod="openstack/ovn-northd-0" podUID="2c300f9b-996e-4271-992d-932fbbb5e64f" Nov 29 07:07:35 crc kubenswrapper[4943]: E1129 07:07:35.836141 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified\\\"\"" pod="openstack/ovn-northd-0" podUID="2c300f9b-996e-4271-992d-932fbbb5e64f" Nov 29 07:07:37 crc kubenswrapper[4943]: I1129 07:07:37.850274 4943 generic.go:334] "Generic (PLEG): container finished" podID="a756fe5f-1037-4ec3-b91a-fdce5d723f04" containerID="debc737beb2a2c3891a93c8c984163fa09785dd1d54eb1b86589fcccc5032fd2" exitCode=0 Nov 29 07:07:37 crc kubenswrapper[4943]: I1129 07:07:37.850551 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a756fe5f-1037-4ec3-b91a-fdce5d723f04","Type":"ContainerDied","Data":"debc737beb2a2c3891a93c8c984163fa09785dd1d54eb1b86589fcccc5032fd2"} Nov 29 07:07:37 crc kubenswrapper[4943]: I1129 07:07:37.853160 4943 generic.go:334] "Generic (PLEG): container finished" podID="57b1dafb-1dfa-4f23-8335-50600bc5becb" containerID="d978eca9e16a4650664c63d58b83ae63e78a8bbb25bd6ed9e076ce7c1fee003c" exitCode=0 Nov 29 07:07:37 crc kubenswrapper[4943]: I1129 07:07:37.854371 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"57b1dafb-1dfa-4f23-8335-50600bc5becb","Type":"ContainerDied","Data":"d978eca9e16a4650664c63d58b83ae63e78a8bbb25bd6ed9e076ce7c1fee003c"} Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.027450 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:38 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:38 crc kubenswrapper[4943]: > Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.088754 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gfvtr" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.306310 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5gpxg-config-bf47b"] Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.307525 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.309425 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.316697 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg-config-bf47b"] Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348110 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348185 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h7lz\" (UniqueName: \"kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348226 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348260 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348425 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.348461 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450252 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450318 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450441 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450502 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450595 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450664 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h7lz\" (UniqueName: \"kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.450695 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.451159 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.451214 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.451502 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.453129 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.470937 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h7lz\" (UniqueName: \"kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz\") pod \"ovn-controller-5gpxg-config-bf47b\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.631780 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.874424 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a756fe5f-1037-4ec3-b91a-fdce5d723f04","Type":"ContainerStarted","Data":"2548944a57f3c5e5d0a27164e6caf69ab9f0c554ec399247c00d0bafda949c62"} Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.878838 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"57b1dafb-1dfa-4f23-8335-50600bc5becb","Type":"ContainerStarted","Data":"ff7aebed8ae13424b4980317b3548ea8a23d3ce6a72d66de54e1722663511fb9"} Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.916247 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=166.371644589 podStartE2EDuration="3m4.916224992s" podCreationTimestamp="2025-11-29 07:04:34 +0000 UTC" firstStartedPulling="2025-11-29 07:05:04.986831965 +0000 UTC m=+1879.916920718" lastFinishedPulling="2025-11-29 07:05:23.531412368 +0000 UTC m=+1898.461501121" observedRunningTime="2025-11-29 07:07:38.89880144 +0000 UTC m=+2033.828890193" watchObservedRunningTime="2025-11-29 07:07:38.916224992 +0000 UTC m=+2033.846313745" Nov 29 07:07:38 crc kubenswrapper[4943]: I1129 07:07:38.933243 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371850.921558 podStartE2EDuration="3m5.933217773s" podCreationTimestamp="2025-11-29 07:04:33 +0000 UTC" firstStartedPulling="2025-11-29 07:05:05.080678433 +0000 UTC m=+1880.010767186" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:07:38.92339671 +0000 UTC m=+2033.853485483" watchObservedRunningTime="2025-11-29 07:07:38.933217773 +0000 UTC m=+2033.863306536" Nov 29 07:07:39 crc kubenswrapper[4943]: I1129 07:07:39.088383 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg-config-bf47b"] Nov 29 07:07:39 crc kubenswrapper[4943]: I1129 07:07:39.887673 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-bf47b" event={"ID":"294a99ab-a769-4bd3-b43b-00b300e42b85","Type":"ContainerStarted","Data":"1ca94f56581e49f381a229203a2d700b466e97fdf49e7a2dceb1cd2402aba3f2"} Nov 29 07:07:43 crc kubenswrapper[4943]: I1129 07:07:43.027406 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5gpxg" podUID="af59b739-6a81-44bf-a1f6-2d6d3038c43f" containerName="ovn-controller" probeResult="failure" output=< Nov 29 07:07:43 crc kubenswrapper[4943]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 07:07:43 crc kubenswrapper[4943]: > Nov 29 07:07:43 crc kubenswrapper[4943]: I1129 07:07:43.248788 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:07:43 crc kubenswrapper[4943]: I1129 07:07:43.271812 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 29 07:07:44 crc kubenswrapper[4943]: I1129 07:07:44.400290 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 29 07:07:44 crc kubenswrapper[4943]: I1129 07:07:44.401273 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 29 07:07:45 crc kubenswrapper[4943]: I1129 07:07:45.829230 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 29 07:07:45 crc kubenswrapper[4943]: I1129 07:07:45.829591 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 29 07:07:46 crc kubenswrapper[4943]: I1129 07:07:46.945490 4943 generic.go:334] "Generic (PLEG): container finished" podID="294a99ab-a769-4bd3-b43b-00b300e42b85" containerID="07109d17b0b24581bf2bcc89af945e803454f116c73a8d96b46ba426091ada09" exitCode=0 Nov 29 07:07:46 crc kubenswrapper[4943]: I1129 07:07:46.945604 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-bf47b" event={"ID":"294a99ab-a769-4bd3-b43b-00b300e42b85","Type":"ContainerDied","Data":"07109d17b0b24581bf2bcc89af945e803454f116c73a8d96b46ba426091ada09"} Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.022352 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-5gpxg" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.260527 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404370 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404453 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404490 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404519 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4h7lz\" (UniqueName: \"kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404596 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404647 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404708 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run\") pod \"294a99ab-a769-4bd3-b43b-00b300e42b85\" (UID: \"294a99ab-a769-4bd3-b43b-00b300e42b85\") " Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404751 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.404839 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run" (OuterVolumeSpecName: "var-run") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.405203 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.405463 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts" (OuterVolumeSpecName: "scripts") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.406314 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.406338 4943 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.406351 4943 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.406362 4943 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/294a99ab-a769-4bd3-b43b-00b300e42b85-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.406372 4943 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/294a99ab-a769-4bd3-b43b-00b300e42b85-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.411389 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz" (OuterVolumeSpecName: "kube-api-access-4h7lz") pod "294a99ab-a769-4bd3-b43b-00b300e42b85" (UID: "294a99ab-a769-4bd3-b43b-00b300e42b85"). InnerVolumeSpecName "kube-api-access-4h7lz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.507968 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4h7lz\" (UniqueName: \"kubernetes.io/projected/294a99ab-a769-4bd3-b43b-00b300e42b85-kube-api-access-4h7lz\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.965059 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-bf47b" event={"ID":"294a99ab-a769-4bd3-b43b-00b300e42b85","Type":"ContainerDied","Data":"1ca94f56581e49f381a229203a2d700b466e97fdf49e7a2dceb1cd2402aba3f2"} Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.966076 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ca94f56581e49f381a229203a2d700b466e97fdf49e7a2dceb1cd2402aba3f2" Nov 29 07:07:48 crc kubenswrapper[4943]: I1129 07:07:48.965184 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-bf47b" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.373135 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5gpxg-config-bf47b"] Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.381696 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5gpxg-config-bf47b"] Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.470608 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5gpxg-config-wzcz4"] Nov 29 07:07:49 crc kubenswrapper[4943]: E1129 07:07:49.471037 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="294a99ab-a769-4bd3-b43b-00b300e42b85" containerName="ovn-config" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.471056 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="294a99ab-a769-4bd3-b43b-00b300e42b85" containerName="ovn-config" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.471262 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="294a99ab-a769-4bd3-b43b-00b300e42b85" containerName="ovn-config" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.471993 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.475061 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.481168 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg-config-wzcz4"] Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624731 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624791 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2rqq\" (UniqueName: \"kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624842 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624881 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624920 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.624954 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.726848 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.726923 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2rqq\" (UniqueName: \"kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.726956 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727013 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727097 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727341 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727542 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727880 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.727948 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.728060 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.729149 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.744416 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2rqq\" (UniqueName: \"kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq\") pod \"ovn-controller-5gpxg-config-wzcz4\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:49 crc kubenswrapper[4943]: I1129 07:07:49.787171 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:50 crc kubenswrapper[4943]: I1129 07:07:50.196025 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5gpxg-config-wzcz4"] Nov 29 07:07:50 crc kubenswrapper[4943]: I1129 07:07:50.979173 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-wzcz4" event={"ID":"7d0a33b5-291d-44c3-9586-d487ca1fdb6b","Type":"ContainerStarted","Data":"78a2a5bebe0275e42aa91705a2ca7ef230a0bee8e181e2ff37254ba1021004b1"} Nov 29 07:07:51 crc kubenswrapper[4943]: I1129 07:07:51.336965 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="294a99ab-a769-4bd3-b43b-00b300e42b85" path="/var/lib/kubelet/pods/294a99ab-a769-4bd3-b43b-00b300e42b85/volumes" Nov 29 07:07:52 crc kubenswrapper[4943]: I1129 07:07:52.997267 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-wzcz4" event={"ID":"7d0a33b5-291d-44c3-9586-d487ca1fdb6b","Type":"ContainerStarted","Data":"7c1e17d6e133478b720e92b2f1690ded1a4fa0f19ec406b9dfb3f9838e14345b"} Nov 29 07:07:54 crc kubenswrapper[4943]: I1129 07:07:54.006993 4943 generic.go:334] "Generic (PLEG): container finished" podID="7d0a33b5-291d-44c3-9586-d487ca1fdb6b" containerID="7c1e17d6e133478b720e92b2f1690ded1a4fa0f19ec406b9dfb3f9838e14345b" exitCode=0 Nov 29 07:07:54 crc kubenswrapper[4943]: I1129 07:07:54.007066 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-wzcz4" event={"ID":"7d0a33b5-291d-44c3-9586-d487ca1fdb6b","Type":"ContainerDied","Data":"7c1e17d6e133478b720e92b2f1690ded1a4fa0f19ec406b9dfb3f9838e14345b"} Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.588238 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635009 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635137 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635196 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635274 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635326 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.635348 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2rqq\" (UniqueName: \"kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq\") pod \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\" (UID: \"7d0a33b5-291d-44c3-9586-d487ca1fdb6b\") " Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.636676 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run" (OuterVolumeSpecName: "var-run") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.636715 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.636707 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.637082 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.637162 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts" (OuterVolumeSpecName: "scripts") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.641971 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq" (OuterVolumeSpecName: "kube-api-access-n2rqq") pod "7d0a33b5-291d-44c3-9586-d487ca1fdb6b" (UID: "7d0a33b5-291d-44c3-9586-d487ca1fdb6b"). InnerVolumeSpecName "kube-api-access-n2rqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737054 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737089 4943 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737101 4943 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737113 4943 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737128 4943 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:56 crc kubenswrapper[4943]: I1129 07:07:56.737141 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2rqq\" (UniqueName: \"kubernetes.io/projected/7d0a33b5-291d-44c3-9586-d487ca1fdb6b-kube-api-access-n2rqq\") on node \"crc\" DevicePath \"\"" Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.032288 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2c300f9b-996e-4271-992d-932fbbb5e64f","Type":"ContainerStarted","Data":"552b79ec551a2d904918018c8220d1c51ec06f1594c6b90ca432133982672219"} Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.034070 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5gpxg-config-wzcz4" event={"ID":"7d0a33b5-291d-44c3-9586-d487ca1fdb6b","Type":"ContainerDied","Data":"78a2a5bebe0275e42aa91705a2ca7ef230a0bee8e181e2ff37254ba1021004b1"} Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.034097 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78a2a5bebe0275e42aa91705a2ca7ef230a0bee8e181e2ff37254ba1021004b1" Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.034131 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5gpxg-config-wzcz4" Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.664451 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5gpxg-config-wzcz4"] Nov 29 07:07:57 crc kubenswrapper[4943]: I1129 07:07:57.672679 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5gpxg-config-wzcz4"] Nov 29 07:07:58 crc kubenswrapper[4943]: I1129 07:07:58.041245 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 29 07:07:58 crc kubenswrapper[4943]: I1129 07:07:58.061153 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.25889719 podStartE2EDuration="37.061137102s" podCreationTimestamp="2025-11-29 07:07:21 +0000 UTC" firstStartedPulling="2025-11-29 07:07:22.834268967 +0000 UTC m=+2017.764357720" lastFinishedPulling="2025-11-29 07:07:56.636508879 +0000 UTC m=+2051.566597632" observedRunningTime="2025-11-29 07:07:58.058578209 +0000 UTC m=+2052.988666972" watchObservedRunningTime="2025-11-29 07:07:58.061137102 +0000 UTC m=+2052.991225855" Nov 29 07:07:59 crc kubenswrapper[4943]: I1129 07:07:59.006517 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 29 07:07:59 crc kubenswrapper[4943]: I1129 07:07:59.092756 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="a756fe5f-1037-4ec3-b91a-fdce5d723f04" containerName="galera" probeResult="failure" output=< Nov 29 07:07:59 crc kubenswrapper[4943]: wsrep_local_state_comment (Joined) differs from Synced Nov 29 07:07:59 crc kubenswrapper[4943]: > Nov 29 07:07:59 crc kubenswrapper[4943]: I1129 07:07:59.336636 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d0a33b5-291d-44c3-9586-d487ca1fdb6b" path="/var/lib/kubelet/pods/7d0a33b5-291d-44c3-9586-d487ca1fdb6b/volumes" Nov 29 07:08:02 crc kubenswrapper[4943]: I1129 07:08:02.677107 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 29 07:08:02 crc kubenswrapper[4943]: I1129 07:08:02.761161 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.982871 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d123-account-create-update-zdc47"] Nov 29 07:08:04 crc kubenswrapper[4943]: E1129 07:08:04.983453 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d0a33b5-291d-44c3-9586-d487ca1fdb6b" containerName="ovn-config" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.983464 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d0a33b5-291d-44c3-9586-d487ca1fdb6b" containerName="ovn-config" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.983630 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d0a33b5-291d-44c3-9586-d487ca1fdb6b" containerName="ovn-config" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.984121 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.986443 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 29 07:08:04 crc kubenswrapper[4943]: I1129 07:08:04.995387 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d123-account-create-update-zdc47"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.038527 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-hr2kc"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.039525 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.040531 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.040612 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.040652 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xcqx\" (UniqueName: \"kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.040692 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9x6d\" (UniqueName: \"kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.056803 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hr2kc"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.141717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.141804 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xcqx\" (UniqueName: \"kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.141868 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9x6d\" (UniqueName: \"kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.142020 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.142751 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.142945 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.143483 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-qfd9g"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.144642 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.155981 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-2086-account-create-update-6gl95"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.157141 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.159717 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.167511 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qfd9g"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.168589 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9x6d\" (UniqueName: \"kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d\") pod \"cinder-d123-account-create-update-zdc47\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.169045 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xcqx\" (UniqueName: \"kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx\") pod \"cinder-db-create-hr2kc\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.174431 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2086-account-create-update-6gl95"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.242889 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-zxcn5"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.243452 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nctc4\" (UniqueName: \"kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.243511 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.243540 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crvbl\" (UniqueName: \"kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.243634 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.244185 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.250743 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3fcd-account-create-update-fp4gv"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.251984 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.253538 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.258128 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-zxcn5"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.272222 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3fcd-account-create-update-fp4gv"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.305015 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.345987 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347327 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmbft\" (UniqueName: \"kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347478 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nctc4\" (UniqueName: \"kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347810 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347874 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crvbl\" (UniqueName: \"kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.347368 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.349748 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.358685 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.374277 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nctc4\" (UniqueName: \"kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4\") pod \"barbican-db-create-qfd9g\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.385277 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crvbl\" (UniqueName: \"kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl\") pod \"barbican-2086-account-create-update-6gl95\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.449067 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmbft\" (UniqueName: \"kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.449520 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.449628 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6lwj\" (UniqueName: \"kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.449692 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.450777 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.471128 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.478593 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmbft\" (UniqueName: \"kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft\") pod \"neutron-3fcd-account-create-update-fp4gv\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.511277 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.552105 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6lwj\" (UniqueName: \"kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.552167 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.552924 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.582875 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6lwj\" (UniqueName: \"kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj\") pod \"neutron-db-create-zxcn5\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.584791 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.638787 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-42ffw"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.640878 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.655338 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.655409 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpcb4\" (UniqueName: \"kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.658725 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-42ffw"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.742497 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-9232-account-create-update-lrvzq"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.743575 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.746087 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.750208 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9232-account-create-update-lrvzq"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.756007 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.756058 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.756097 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpcb4\" (UniqueName: \"kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.756116 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk8rn\" (UniqueName: \"kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.756853 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.783597 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpcb4\" (UniqueName: \"kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4\") pod \"keystone-db-create-42ffw\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.856523 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.856618 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk8rn\" (UniqueName: \"kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.857852 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.867573 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.889390 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk8rn\" (UniqueName: \"kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn\") pod \"keystone-9232-account-create-update-lrvzq\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.892962 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-b55h5"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.905620 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-b55h5" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.942648 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-b55h5"] Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.946438 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 29 07:08:05 crc kubenswrapper[4943]: I1129 07:08:05.978190 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:05 crc kubenswrapper[4943]: W1129 07:08:05.988725 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4585ad40_a69e_4a85_9f0f_e051f80ec2a2.slice/crio-0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554 WatchSource:0}: Error finding container 0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554: Status 404 returned error can't find the container with id 0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554 Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.001949 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hr2kc"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.043308 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d123-account-create-update-zdc47"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.058639 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-39a1-account-create-update-bjzkr"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.059862 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvrlp\" (UniqueName: \"kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.059956 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.060138 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.064776 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.065992 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.072902 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3fcd-account-create-update-fp4gv"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.098676 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-39a1-account-create-update-bjzkr"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.106916 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hr2kc" event={"ID":"4585ad40-a69e-4a85-9f0f-e051f80ec2a2","Type":"ContainerStarted","Data":"0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554"} Nov 29 07:08:06 crc kubenswrapper[4943]: W1129 07:08:06.108546 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b1a5eac_fd81_4595_a46b_dc0d79e09def.slice/crio-0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0 WatchSource:0}: Error finding container 0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0: Status 404 returned error can't find the container with id 0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0 Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.119774 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2086-account-create-update-6gl95"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.126713 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qfd9g"] Nov 29 07:08:06 crc kubenswrapper[4943]: W1129 07:08:06.145277 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9380c6cb_987b_4813_b0fd_0fc45c0ddaaa.slice/crio-1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b WatchSource:0}: Error finding container 1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b: Status 404 returned error can't find the container with id 1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.161743 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.161806 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvrlp\" (UniqueName: \"kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.161868 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nwqn\" (UniqueName: \"kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.161890 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.162663 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.191968 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvrlp\" (UniqueName: \"kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp\") pod \"placement-db-create-b55h5\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.264555 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nwqn\" (UniqueName: \"kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.266030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.267813 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.268427 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-b55h5" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.298446 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nwqn\" (UniqueName: \"kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn\") pod \"placement-39a1-account-create-update-bjzkr\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.402205 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-42ffw"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.402583 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.431451 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-zxcn5"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.516773 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9232-account-create-update-lrvzq"] Nov 29 07:08:06 crc kubenswrapper[4943]: I1129 07:08:06.940610 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-b55h5"] Nov 29 07:08:06 crc kubenswrapper[4943]: W1129 07:08:06.943465 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03ca0239_e8f6_4461_a843_27423c41c4b3.slice/crio-9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695 WatchSource:0}: Error finding container 9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695: Status 404 returned error can't find the container with id 9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695 Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.094219 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-39a1-account-create-update-bjzkr"] Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.121538 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-39a1-account-create-update-bjzkr" event={"ID":"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2","Type":"ContainerStarted","Data":"f5ba474fd33fa20485c27fa1b7c473769b4bff4d1372ad19901df2e87cc8539a"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.123433 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d123-account-create-update-zdc47" event={"ID":"1b1a5eac-fd81-4595-a46b-dc0d79e09def","Type":"ContainerStarted","Data":"812bddea28046febf505711572111b12eaeffd67e2db3f86911b6b9e716b58d8"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.123481 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d123-account-create-update-zdc47" event={"ID":"1b1a5eac-fd81-4595-a46b-dc0d79e09def","Type":"ContainerStarted","Data":"0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.125805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2086-account-create-update-6gl95" event={"ID":"8ea41621-2d8a-4f99-ad06-da34cadcca6a","Type":"ContainerStarted","Data":"3c24bdf867712015a1678899e4b340040c5d61d8afbc28b43f1989a0826ce83d"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.125842 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2086-account-create-update-6gl95" event={"ID":"8ea41621-2d8a-4f99-ad06-da34cadcca6a","Type":"ContainerStarted","Data":"dd6421ee08cb7e254ef2ed395815e43061494e2fc7ca43dbfcad2df956e70683"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.128487 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zxcn5" event={"ID":"43b36ea0-76e2-4c88-acb5-1009ac0c4383","Type":"ContainerStarted","Data":"40069339da6faee502fef270fc26c5c39e4502fb012e1eb3e8ee912328070e78"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.128527 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zxcn5" event={"ID":"43b36ea0-76e2-4c88-acb5-1009ac0c4383","Type":"ContainerStarted","Data":"83151d7d5fb7443840332674da7ce5af7404ddec9e53f64226f8ce59d1db7762"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.129777 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3fcd-account-create-update-fp4gv" event={"ID":"a178d2d1-8c2f-4da5-bad1-15edb77a9508","Type":"ContainerStarted","Data":"1bd3ea063b789f68267abc2074a9c569cff4e391ba9eaf887e29a7c7d18a8f57"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.129812 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3fcd-account-create-update-fp4gv" event={"ID":"a178d2d1-8c2f-4da5-bad1-15edb77a9508","Type":"ContainerStarted","Data":"feb054c1ef878b2b961915b477c0cd7e2eeeb194f68f18fd18f8bcc49b9891b0"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.132166 4943 generic.go:334] "Generic (PLEG): container finished" podID="4585ad40-a69e-4a85-9f0f-e051f80ec2a2" containerID="f0ad72ec7d3168ba523f0752c312e41a6e43ed4caa235fb078189bbbbf5d8766" exitCode=0 Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.132219 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hr2kc" event={"ID":"4585ad40-a69e-4a85-9f0f-e051f80ec2a2","Type":"ContainerDied","Data":"f0ad72ec7d3168ba523f0752c312e41a6e43ed4caa235fb078189bbbbf5d8766"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.133739 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qfd9g" event={"ID":"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa","Type":"ContainerStarted","Data":"49eb44bbf07b71e42a3a8bf15c19af51acb0736fa53457792a2dec56644cbb16"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.133768 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qfd9g" event={"ID":"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa","Type":"ContainerStarted","Data":"1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.142360 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-d123-account-create-update-zdc47" podStartSLOduration=3.142338427 podStartE2EDuration="3.142338427s" podCreationTimestamp="2025-11-29 07:08:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:07.137330932 +0000 UTC m=+2062.067419695" watchObservedRunningTime="2025-11-29 07:08:07.142338427 +0000 UTC m=+2062.072427190" Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.143399 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-42ffw" event={"ID":"b2698992-ed9a-4c83-9294-71ba289f83f9","Type":"ContainerStarted","Data":"bd3e80ccfa3c12715c355eb49afd4ecd5a2f1ce180109cbbb104bb02b4cd5f0c"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.143448 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-42ffw" event={"ID":"b2698992-ed9a-4c83-9294-71ba289f83f9","Type":"ContainerStarted","Data":"0487f1d155386e94a6a79894779c2e52c74f46dbb40368ceb142cbeb7ef4bf88"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.144920 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-b55h5" event={"ID":"03ca0239-e8f6-4461-a843-27423c41c4b3","Type":"ContainerStarted","Data":"9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.145843 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9232-account-create-update-lrvzq" event={"ID":"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7","Type":"ContainerStarted","Data":"21de499bda3ddfdfb2aefda5b5d44695a04b4a7749c91537d7f7de3cb008a925"} Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.171772 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-2086-account-create-update-6gl95" podStartSLOduration=2.1717457749999998 podStartE2EDuration="2.171745775s" podCreationTimestamp="2025-11-29 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:07.16994215 +0000 UTC m=+2062.100030913" watchObservedRunningTime="2025-11-29 07:08:07.171745775 +0000 UTC m=+2062.101834528" Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.204337 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-3fcd-account-create-update-fp4gv" podStartSLOduration=2.204316491 podStartE2EDuration="2.204316491s" podCreationTimestamp="2025-11-29 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:07.199190915 +0000 UTC m=+2062.129279688" watchObservedRunningTime="2025-11-29 07:08:07.204316491 +0000 UTC m=+2062.134405244" Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.217030 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-qfd9g" podStartSLOduration=2.217008055 podStartE2EDuration="2.217008055s" podCreationTimestamp="2025-11-29 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:07.211719365 +0000 UTC m=+2062.141808128" watchObservedRunningTime="2025-11-29 07:08:07.217008055 +0000 UTC m=+2062.147096808" Nov 29 07:08:07 crc kubenswrapper[4943]: I1129 07:08:07.318597 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.172290 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-zxcn5" podStartSLOduration=3.172269789 podStartE2EDuration="3.172269789s" podCreationTimestamp="2025-11-29 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:08.170060504 +0000 UTC m=+2063.100149257" watchObservedRunningTime="2025-11-29 07:08:08.172269789 +0000 UTC m=+2063.102358542" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.516208 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.612683 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts\") pod \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.612800 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xcqx\" (UniqueName: \"kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx\") pod \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\" (UID: \"4585ad40-a69e-4a85-9f0f-e051f80ec2a2\") " Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.613286 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4585ad40-a69e-4a85-9f0f-e051f80ec2a2" (UID: "4585ad40-a69e-4a85-9f0f-e051f80ec2a2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.613660 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.619656 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx" (OuterVolumeSpecName: "kube-api-access-4xcqx") pod "4585ad40-a69e-4a85-9f0f-e051f80ec2a2" (UID: "4585ad40-a69e-4a85-9f0f-e051f80ec2a2"). InnerVolumeSpecName "kube-api-access-4xcqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:08 crc kubenswrapper[4943]: I1129 07:08:08.715120 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xcqx\" (UniqueName: \"kubernetes.io/projected/4585ad40-a69e-4a85-9f0f-e051f80ec2a2-kube-api-access-4xcqx\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.163896 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hr2kc" event={"ID":"4585ad40-a69e-4a85-9f0f-e051f80ec2a2","Type":"ContainerDied","Data":"0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554"} Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.163945 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f192ad8100bcf43a542a6caf31e1717288f9ee9dda9233a209ab9fc5f403554" Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.164020 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hr2kc" Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.165315 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-b55h5" event={"ID":"03ca0239-e8f6-4461-a843-27423c41c4b3","Type":"ContainerStarted","Data":"e99a74850d328e67e8b77319697ca772ba91e0b146a63cd4bad836ddff781b39"} Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.166806 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9232-account-create-update-lrvzq" event={"ID":"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7","Type":"ContainerStarted","Data":"4d311826fc9b2c68f96b713c3041595e7cd1f52688c1a5e7767499bef047c281"} Nov 29 07:08:09 crc kubenswrapper[4943]: I1129 07:08:09.187112 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-42ffw" podStartSLOduration=4.187079875 podStartE2EDuration="4.187079875s" podCreationTimestamp="2025-11-29 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:09.183898447 +0000 UTC m=+2064.113987240" watchObservedRunningTime="2025-11-29 07:08:09.187079875 +0000 UTC m=+2064.117168628" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.116025 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-qhcwg"] Nov 29 07:08:11 crc kubenswrapper[4943]: E1129 07:08:11.117674 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4585ad40-a69e-4a85-9f0f-e051f80ec2a2" containerName="mariadb-database-create" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.117789 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4585ad40-a69e-4a85-9f0f-e051f80ec2a2" containerName="mariadb-database-create" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.118330 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4585ad40-a69e-4a85-9f0f-e051f80ec2a2" containerName="mariadb-database-create" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.119204 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.127515 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-qhcwg"] Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.192484 4943 generic.go:334] "Generic (PLEG): container finished" podID="8ea41621-2d8a-4f99-ad06-da34cadcca6a" containerID="3c24bdf867712015a1678899e4b340040c5d61d8afbc28b43f1989a0826ce83d" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.192903 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2086-account-create-update-6gl95" event={"ID":"8ea41621-2d8a-4f99-ad06-da34cadcca6a","Type":"ContainerDied","Data":"3c24bdf867712015a1678899e4b340040c5d61d8afbc28b43f1989a0826ce83d"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.198203 4943 generic.go:334] "Generic (PLEG): container finished" podID="7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" containerID="4d311826fc9b2c68f96b713c3041595e7cd1f52688c1a5e7767499bef047c281" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.198239 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9232-account-create-update-lrvzq" event={"ID":"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7","Type":"ContainerDied","Data":"4d311826fc9b2c68f96b713c3041595e7cd1f52688c1a5e7767499bef047c281"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.202090 4943 generic.go:334] "Generic (PLEG): container finished" podID="9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" containerID="49eb44bbf07b71e42a3a8bf15c19af51acb0736fa53457792a2dec56644cbb16" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.202135 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qfd9g" event={"ID":"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa","Type":"ContainerDied","Data":"49eb44bbf07b71e42a3a8bf15c19af51acb0736fa53457792a2dec56644cbb16"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.205432 4943 generic.go:334] "Generic (PLEG): container finished" podID="1b1a5eac-fd81-4595-a46b-dc0d79e09def" containerID="812bddea28046febf505711572111b12eaeffd67e2db3f86911b6b9e716b58d8" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.205502 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d123-account-create-update-zdc47" event={"ID":"1b1a5eac-fd81-4595-a46b-dc0d79e09def","Type":"ContainerDied","Data":"812bddea28046febf505711572111b12eaeffd67e2db3f86911b6b9e716b58d8"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.213343 4943 generic.go:334] "Generic (PLEG): container finished" podID="b2698992-ed9a-4c83-9294-71ba289f83f9" containerID="bd3e80ccfa3c12715c355eb49afd4ecd5a2f1ce180109cbbb104bb02b4cd5f0c" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.213450 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-42ffw" event={"ID":"b2698992-ed9a-4c83-9294-71ba289f83f9","Type":"ContainerDied","Data":"bd3e80ccfa3c12715c355eb49afd4ecd5a2f1ce180109cbbb104bb02b4cd5f0c"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.216759 4943 generic.go:334] "Generic (PLEG): container finished" podID="a178d2d1-8c2f-4da5-bad1-15edb77a9508" containerID="1bd3ea063b789f68267abc2074a9c569cff4e391ba9eaf887e29a7c7d18a8f57" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.216839 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3fcd-account-create-update-fp4gv" event={"ID":"a178d2d1-8c2f-4da5-bad1-15edb77a9508","Type":"ContainerDied","Data":"1bd3ea063b789f68267abc2074a9c569cff4e391ba9eaf887e29a7c7d18a8f57"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.219133 4943 generic.go:334] "Generic (PLEG): container finished" podID="43b36ea0-76e2-4c88-acb5-1009ac0c4383" containerID="40069339da6faee502fef270fc26c5c39e4502fb012e1eb3e8ee912328070e78" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.219185 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zxcn5" event={"ID":"43b36ea0-76e2-4c88-acb5-1009ac0c4383","Type":"ContainerDied","Data":"40069339da6faee502fef270fc26c5c39e4502fb012e1eb3e8ee912328070e78"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.224288 4943 generic.go:334] "Generic (PLEG): container finished" podID="03ca0239-e8f6-4461-a843-27423c41c4b3" containerID="e99a74850d328e67e8b77319697ca772ba91e0b146a63cd4bad836ddff781b39" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.224384 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-b55h5" event={"ID":"03ca0239-e8f6-4461-a843-27423c41c4b3","Type":"ContainerDied","Data":"e99a74850d328e67e8b77319697ca772ba91e0b146a63cd4bad836ddff781b39"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.227714 4943 generic.go:334] "Generic (PLEG): container finished" podID="def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" containerID="475ae878fac798e5a8e5c17195778d15dd9f86cbf30a2fcda7356582ffac0118" exitCode=0 Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.227783 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-39a1-account-create-update-bjzkr" event={"ID":"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2","Type":"ContainerDied","Data":"475ae878fac798e5a8e5c17195778d15dd9f86cbf30a2fcda7356582ffac0118"} Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.251353 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-daf9-account-create-update-qq5jc"] Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.253303 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.255520 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.257670 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.257784 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtck4\" (UniqueName: \"kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.261794 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-daf9-account-create-update-qq5jc"] Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.359233 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.359382 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtck4\" (UniqueName: \"kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.359433 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.359464 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f65bp\" (UniqueName: \"kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.360602 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.386707 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtck4\" (UniqueName: \"kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4\") pod \"glance-db-create-qhcwg\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.460991 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.461066 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f65bp\" (UniqueName: \"kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.462350 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.482614 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.483749 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f65bp\" (UniqueName: \"kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp\") pod \"glance-daf9-account-create-update-qq5jc\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.580553 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:11 crc kubenswrapper[4943]: I1129 07:08:11.759150 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-qhcwg"] Nov 29 07:08:12 crc kubenswrapper[4943]: W1129 07:08:12.071932 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee3e2ff9_9a6e_4d2e_a487_6574ab627de4.slice/crio-b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b WatchSource:0}: Error finding container b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b: Status 404 returned error can't find the container with id b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.074167 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-daf9-account-create-update-qq5jc"] Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.239367 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-daf9-account-create-update-qq5jc" event={"ID":"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4","Type":"ContainerStarted","Data":"b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b"} Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.241257 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qhcwg" event={"ID":"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb","Type":"ContainerStarted","Data":"79b5e696c6fb2d7285455c6dbd12d74d0ae5080ed1331ef0f6c29f1022664845"} Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.621723 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.786767 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk8rn\" (UniqueName: \"kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn\") pod \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.787201 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts\") pod \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\" (UID: \"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.788046 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" (UID: "7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.792844 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn" (OuterVolumeSpecName: "kube-api-access-wk8rn") pod "7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" (UID: "7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7"). InnerVolumeSpecName "kube-api-access-wk8rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.877152 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.887415 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-b55h5" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.889440 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.889465 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk8rn\" (UniqueName: \"kubernetes.io/projected/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7-kube-api-access-wk8rn\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.900801 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.911042 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.923817 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.938424 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.944354 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.964680 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995671 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crvbl\" (UniqueName: \"kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl\") pod \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995723 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts\") pod \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\" (UID: \"8ea41621-2d8a-4f99-ad06-da34cadcca6a\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995755 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nctc4\" (UniqueName: \"kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4\") pod \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995816 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts\") pod \"03ca0239-e8f6-4461-a843-27423c41c4b3\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995840 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts\") pod \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\" (UID: \"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.995926 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvrlp\" (UniqueName: \"kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp\") pod \"03ca0239-e8f6-4461-a843-27423c41c4b3\" (UID: \"03ca0239-e8f6-4461-a843-27423c41c4b3\") " Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.997062 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "03ca0239-e8f6-4461-a843-27423c41c4b3" (UID: "03ca0239-e8f6-4461-a843-27423c41c4b3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.997310 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ea41621-2d8a-4f99-ad06-da34cadcca6a" (UID: "8ea41621-2d8a-4f99-ad06-da34cadcca6a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.997768 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" (UID: "9380c6cb-987b-4813-b0fd-0fc45c0ddaaa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.999544 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp" (OuterVolumeSpecName: "kube-api-access-xvrlp") pod "03ca0239-e8f6-4461-a843-27423c41c4b3" (UID: "03ca0239-e8f6-4461-a843-27423c41c4b3"). InnerVolumeSpecName "kube-api-access-xvrlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:12 crc kubenswrapper[4943]: I1129 07:08:12.999596 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4" (OuterVolumeSpecName: "kube-api-access-nctc4") pod "9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" (UID: "9380c6cb-987b-4813-b0fd-0fc45c0ddaaa"). InnerVolumeSpecName "kube-api-access-nctc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:12.999993 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl" (OuterVolumeSpecName: "kube-api-access-crvbl") pod "8ea41621-2d8a-4f99-ad06-da34cadcca6a" (UID: "8ea41621-2d8a-4f99-ad06-da34cadcca6a"). InnerVolumeSpecName "kube-api-access-crvbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.096781 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts\") pod \"b2698992-ed9a-4c83-9294-71ba289f83f9\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.096819 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts\") pod \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.096860 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nwqn\" (UniqueName: \"kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn\") pod \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.096963 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts\") pod \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.096996 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpcb4\" (UniqueName: \"kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4\") pod \"b2698992-ed9a-4c83-9294-71ba289f83f9\" (UID: \"b2698992-ed9a-4c83-9294-71ba289f83f9\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097083 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmbft\" (UniqueName: \"kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft\") pod \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\" (UID: \"a178d2d1-8c2f-4da5-bad1-15edb77a9508\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097132 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6lwj\" (UniqueName: \"kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj\") pod \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\" (UID: \"43b36ea0-76e2-4c88-acb5-1009ac0c4383\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097167 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts\") pod \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\" (UID: \"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097201 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9x6d\" (UniqueName: \"kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d\") pod \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097233 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a178d2d1-8c2f-4da5-bad1-15edb77a9508" (UID: "a178d2d1-8c2f-4da5-bad1-15edb77a9508"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097255 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts\") pod \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\" (UID: \"1b1a5eac-fd81-4595-a46b-dc0d79e09def\") " Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097371 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "43b36ea0-76e2-4c88-acb5-1009ac0c4383" (UID: "43b36ea0-76e2-4c88-acb5-1009ac0c4383"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097737 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crvbl\" (UniqueName: \"kubernetes.io/projected/8ea41621-2d8a-4f99-ad06-da34cadcca6a-kube-api-access-crvbl\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097763 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ea41621-2d8a-4f99-ad06-da34cadcca6a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097774 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nctc4\" (UniqueName: \"kubernetes.io/projected/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-kube-api-access-nctc4\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097790 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ca0239-e8f6-4461-a843-27423c41c4b3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097803 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097814 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a178d2d1-8c2f-4da5-bad1-15edb77a9508-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097825 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b36ea0-76e2-4c88-acb5-1009ac0c4383-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097836 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvrlp\" (UniqueName: \"kubernetes.io/projected/03ca0239-e8f6-4461-a843-27423c41c4b3-kube-api-access-xvrlp\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.097907 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2698992-ed9a-4c83-9294-71ba289f83f9" (UID: "b2698992-ed9a-4c83-9294-71ba289f83f9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.098157 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b1a5eac-fd81-4595-a46b-dc0d79e09def" (UID: "1b1a5eac-fd81-4595-a46b-dc0d79e09def"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.098436 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" (UID: "def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.100434 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj" (OuterVolumeSpecName: "kube-api-access-q6lwj") pod "43b36ea0-76e2-4c88-acb5-1009ac0c4383" (UID: "43b36ea0-76e2-4c88-acb5-1009ac0c4383"). InnerVolumeSpecName "kube-api-access-q6lwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.101534 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4" (OuterVolumeSpecName: "kube-api-access-xpcb4") pod "b2698992-ed9a-4c83-9294-71ba289f83f9" (UID: "b2698992-ed9a-4c83-9294-71ba289f83f9"). InnerVolumeSpecName "kube-api-access-xpcb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.101710 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d" (OuterVolumeSpecName: "kube-api-access-c9x6d") pod "1b1a5eac-fd81-4595-a46b-dc0d79e09def" (UID: "1b1a5eac-fd81-4595-a46b-dc0d79e09def"). InnerVolumeSpecName "kube-api-access-c9x6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.101989 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft" (OuterVolumeSpecName: "kube-api-access-kmbft") pod "a178d2d1-8c2f-4da5-bad1-15edb77a9508" (UID: "a178d2d1-8c2f-4da5-bad1-15edb77a9508"). InnerVolumeSpecName "kube-api-access-kmbft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.102964 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn" (OuterVolumeSpecName: "kube-api-access-7nwqn") pod "def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" (UID: "def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2"). InnerVolumeSpecName "kube-api-access-7nwqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199494 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nwqn\" (UniqueName: \"kubernetes.io/projected/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-kube-api-access-7nwqn\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199535 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpcb4\" (UniqueName: \"kubernetes.io/projected/b2698992-ed9a-4c83-9294-71ba289f83f9-kube-api-access-xpcb4\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199552 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmbft\" (UniqueName: \"kubernetes.io/projected/a178d2d1-8c2f-4da5-bad1-15edb77a9508-kube-api-access-kmbft\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199584 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6lwj\" (UniqueName: \"kubernetes.io/projected/43b36ea0-76e2-4c88-acb5-1009ac0c4383-kube-api-access-q6lwj\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199603 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199619 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9x6d\" (UniqueName: \"kubernetes.io/projected/1b1a5eac-fd81-4595-a46b-dc0d79e09def-kube-api-access-c9x6d\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199633 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b1a5eac-fd81-4595-a46b-dc0d79e09def-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.199648 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2698992-ed9a-4c83-9294-71ba289f83f9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.257505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-42ffw" event={"ID":"b2698992-ed9a-4c83-9294-71ba289f83f9","Type":"ContainerDied","Data":"0487f1d155386e94a6a79894779c2e52c74f46dbb40368ceb142cbeb7ef4bf88"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.257619 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0487f1d155386e94a6a79894779c2e52c74f46dbb40368ceb142cbeb7ef4bf88" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.257742 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-42ffw" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.262830 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zxcn5" event={"ID":"43b36ea0-76e2-4c88-acb5-1009ac0c4383","Type":"ContainerDied","Data":"83151d7d5fb7443840332674da7ce5af7404ddec9e53f64226f8ce59d1db7762"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.263090 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83151d7d5fb7443840332674da7ce5af7404ddec9e53f64226f8ce59d1db7762" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.262842 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zxcn5" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.264495 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-39a1-account-create-update-bjzkr" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.264503 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-39a1-account-create-update-bjzkr" event={"ID":"def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2","Type":"ContainerDied","Data":"f5ba474fd33fa20485c27fa1b7c473769b4bff4d1372ad19901df2e87cc8539a"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.264546 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5ba474fd33fa20485c27fa1b7c473769b4bff4d1372ad19901df2e87cc8539a" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.266095 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2086-account-create-update-6gl95" event={"ID":"8ea41621-2d8a-4f99-ad06-da34cadcca6a","Type":"ContainerDied","Data":"dd6421ee08cb7e254ef2ed395815e43061494e2fc7ca43dbfcad2df956e70683"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.266123 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd6421ee08cb7e254ef2ed395815e43061494e2fc7ca43dbfcad2df956e70683" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.266166 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2086-account-create-update-6gl95" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.272589 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3fcd-account-create-update-fp4gv" event={"ID":"a178d2d1-8c2f-4da5-bad1-15edb77a9508","Type":"ContainerDied","Data":"feb054c1ef878b2b961915b477c0cd7e2eeeb194f68f18fd18f8bcc49b9891b0"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.272637 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="feb054c1ef878b2b961915b477c0cd7e2eeeb194f68f18fd18f8bcc49b9891b0" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.272701 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3fcd-account-create-update-fp4gv" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.274048 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-b55h5" event={"ID":"03ca0239-e8f6-4461-a843-27423c41c4b3","Type":"ContainerDied","Data":"9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.274075 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ba1c7e99c1a307aef9dae44e17f46e5c6d69a08ae7c8c8a6ced51ef27eba695" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.274120 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-b55h5" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.277670 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9232-account-create-update-lrvzq" event={"ID":"7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7","Type":"ContainerDied","Data":"21de499bda3ddfdfb2aefda5b5d44695a04b4a7749c91537d7f7de3cb008a925"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.277722 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21de499bda3ddfdfb2aefda5b5d44695a04b4a7749c91537d7f7de3cb008a925" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.277809 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9232-account-create-update-lrvzq" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.284536 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qfd9g" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.284547 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qfd9g" event={"ID":"9380c6cb-987b-4813-b0fd-0fc45c0ddaaa","Type":"ContainerDied","Data":"1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.284672 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1098939e6f5d1aba673351c2a8ab2c922173818e7f753336f102268ba6c59d2b" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.286710 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d123-account-create-update-zdc47" event={"ID":"1b1a5eac-fd81-4595-a46b-dc0d79e09def","Type":"ContainerDied","Data":"0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0"} Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.286739 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d123-account-create-update-zdc47" Nov 29 07:08:13 crc kubenswrapper[4943]: I1129 07:08:13.286763 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ed0854879bd1b6ed5d168d0d8d901dbf6c90d8188ea9839e3acea55fdfa94e0" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.352174 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-skrww"] Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353034 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353047 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353067 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea41621-2d8a-4f99-ad06-da34cadcca6a" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353075 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea41621-2d8a-4f99-ad06-da34cadcca6a" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353085 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43b36ea0-76e2-4c88-acb5-1009ac0c4383" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353091 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="43b36ea0-76e2-4c88-acb5-1009ac0c4383" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353104 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353109 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353117 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353123 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353138 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03ca0239-e8f6-4461-a843-27423c41c4b3" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353144 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="03ca0239-e8f6-4461-a843-27423c41c4b3" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353154 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b1a5eac-fd81-4595-a46b-dc0d79e09def" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353160 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b1a5eac-fd81-4595-a46b-dc0d79e09def" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353170 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2698992-ed9a-4c83-9294-71ba289f83f9" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353176 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2698992-ed9a-4c83-9294-71ba289f83f9" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: E1129 07:08:16.353184 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a178d2d1-8c2f-4da5-bad1-15edb77a9508" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353189 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a178d2d1-8c2f-4da5-bad1-15edb77a9508" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353344 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b1a5eac-fd81-4595-a46b-dc0d79e09def" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353364 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2698992-ed9a-4c83-9294-71ba289f83f9" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353374 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353388 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ea41621-2d8a-4f99-ad06-da34cadcca6a" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353401 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a178d2d1-8c2f-4da5-bad1-15edb77a9508" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353412 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="43b36ea0-76e2-4c88-acb5-1009ac0c4383" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353423 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353437 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="03ca0239-e8f6-4461-a843-27423c41c4b3" containerName="mariadb-database-create" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.353449 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" containerName="mariadb-account-create-update" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.358330 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.363543 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-52hmr" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.369490 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-skrww"] Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.372076 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.372604 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.373710 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.451475 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.451533 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fcjp\" (UniqueName: \"kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.451838 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.552878 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.552936 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fcjp\" (UniqueName: \"kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.552995 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.561097 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.563873 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.570132 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fcjp\" (UniqueName: \"kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp\") pod \"keystone-db-sync-skrww\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:16 crc kubenswrapper[4943]: I1129 07:08:16.681994 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.182755 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-skrww"] Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.318914 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-daf9-account-create-update-qq5jc" event={"ID":"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4","Type":"ContainerStarted","Data":"63769c0a9a18143ad328e5b983d7b53a2d6486270c45e1008e174b0d0e687f8f"} Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.320723 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-skrww" event={"ID":"3462ac03-a13b-49cf-ab08-b8d7a089a7e5","Type":"ContainerStarted","Data":"29bdf2a190072d6eb4d34e6db45ef0501bd5fe9fa9f0da35b1cb8817fc7bbf46"} Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.322648 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qhcwg" event={"ID":"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb","Type":"ContainerStarted","Data":"199414f3d820fc66a2dd9b8ac3304ebe754a84e7836748578081aefa1fcb1c2f"} Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.344667 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-daf9-account-create-update-qq5jc" podStartSLOduration=6.34464162 podStartE2EDuration="6.34464162s" podCreationTimestamp="2025-11-29 07:08:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:17.33860634 +0000 UTC m=+2072.268695103" watchObservedRunningTime="2025-11-29 07:08:17.34464162 +0000 UTC m=+2072.274730373" Nov 29 07:08:17 crc kubenswrapper[4943]: I1129 07:08:17.363069 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-qhcwg" podStartSLOduration=6.363044416 podStartE2EDuration="6.363044416s" podCreationTimestamp="2025-11-29 07:08:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:17.358115873 +0000 UTC m=+2072.288204636" watchObservedRunningTime="2025-11-29 07:08:17.363044416 +0000 UTC m=+2072.293133169" Nov 29 07:08:18 crc kubenswrapper[4943]: I1129 07:08:18.331406 4943 generic.go:334] "Generic (PLEG): container finished" podID="ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" containerID="63769c0a9a18143ad328e5b983d7b53a2d6486270c45e1008e174b0d0e687f8f" exitCode=0 Nov 29 07:08:18 crc kubenswrapper[4943]: I1129 07:08:18.331508 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-daf9-account-create-update-qq5jc" event={"ID":"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4","Type":"ContainerDied","Data":"63769c0a9a18143ad328e5b983d7b53a2d6486270c45e1008e174b0d0e687f8f"} Nov 29 07:08:18 crc kubenswrapper[4943]: I1129 07:08:18.334692 4943 generic.go:334] "Generic (PLEG): container finished" podID="08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" containerID="199414f3d820fc66a2dd9b8ac3304ebe754a84e7836748578081aefa1fcb1c2f" exitCode=0 Nov 29 07:08:18 crc kubenswrapper[4943]: I1129 07:08:18.334747 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qhcwg" event={"ID":"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb","Type":"ContainerDied","Data":"199414f3d820fc66a2dd9b8ac3304ebe754a84e7836748578081aefa1fcb1c2f"} Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.303051 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.306906 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.369491 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-qhcwg" event={"ID":"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb","Type":"ContainerDied","Data":"79b5e696c6fb2d7285455c6dbd12d74d0ae5080ed1331ef0f6c29f1022664845"} Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.369547 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79b5e696c6fb2d7285455c6dbd12d74d0ae5080ed1331ef0f6c29f1022664845" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.369629 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-qhcwg" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.371331 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-daf9-account-create-update-qq5jc" event={"ID":"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4","Type":"ContainerDied","Data":"b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b"} Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.371360 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b15952524581bb2db717a615c0a3986baf76c1821546f44558ad436de2525b0b" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.371379 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-daf9-account-create-update-qq5jc" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.458283 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts\") pod \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.458349 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts\") pod \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.458402 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtck4\" (UniqueName: \"kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4\") pod \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\" (UID: \"08053aab-1c5b-41a7-bd0f-a51e91e7bcbb\") " Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.458590 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f65bp\" (UniqueName: \"kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp\") pod \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\" (UID: \"ee3e2ff9-9a6e-4d2e-a487-6574ab627de4\") " Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.459679 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" (UID: "08053aab-1c5b-41a7-bd0f-a51e91e7bcbb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.460724 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" (UID: "ee3e2ff9-9a6e-4d2e-a487-6574ab627de4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.463413 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp" (OuterVolumeSpecName: "kube-api-access-f65bp") pod "ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" (UID: "ee3e2ff9-9a6e-4d2e-a487-6574ab627de4"). InnerVolumeSpecName "kube-api-access-f65bp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.463450 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4" (OuterVolumeSpecName: "kube-api-access-rtck4") pod "08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" (UID: "08053aab-1c5b-41a7-bd0f-a51e91e7bcbb"). InnerVolumeSpecName "kube-api-access-rtck4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.560678 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f65bp\" (UniqueName: \"kubernetes.io/projected/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-kube-api-access-f65bp\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.561351 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.561400 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:22 crc kubenswrapper[4943]: I1129 07:08:22.561415 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtck4\" (UniqueName: \"kubernetes.io/projected/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb-kube-api-access-rtck4\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:23 crc kubenswrapper[4943]: I1129 07:08:23.382510 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-skrww" event={"ID":"3462ac03-a13b-49cf-ab08-b8d7a089a7e5","Type":"ContainerStarted","Data":"7a32de18e5615145f0c31b1d2ae3110319af8d1f6916b90ab0c68f221c990089"} Nov 29 07:08:23 crc kubenswrapper[4943]: I1129 07:08:23.406099 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-skrww" podStartSLOduration=2.47752168 podStartE2EDuration="7.406079702s" podCreationTimestamp="2025-11-29 07:08:16 +0000 UTC" firstStartedPulling="2025-11-29 07:08:17.193030576 +0000 UTC m=+2072.123119329" lastFinishedPulling="2025-11-29 07:08:22.121559117 +0000 UTC m=+2077.051677351" observedRunningTime="2025-11-29 07:08:23.39630323 +0000 UTC m=+2078.326392003" watchObservedRunningTime="2025-11-29 07:08:23.406079702 +0000 UTC m=+2078.336168455" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.525897 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-t85kr"] Nov 29 07:08:26 crc kubenswrapper[4943]: E1129 07:08:26.526630 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" containerName="mariadb-account-create-update" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.526652 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" containerName="mariadb-account-create-update" Nov 29 07:08:26 crc kubenswrapper[4943]: E1129 07:08:26.526679 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" containerName="mariadb-database-create" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.526690 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" containerName="mariadb-database-create" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.526894 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" containerName="mariadb-database-create" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.526919 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" containerName="mariadb-account-create-update" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.527615 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.535647 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-t85kr"] Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.540111 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.540479 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-kjww4" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.638021 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.638148 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.638187 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6d7x\" (UniqueName: \"kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.638374 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.740475 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.740537 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.740714 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.740750 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6d7x\" (UniqueName: \"kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.747511 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.747810 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.752065 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.758657 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6d7x\" (UniqueName: \"kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x\") pod \"glance-db-sync-t85kr\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:26 crc kubenswrapper[4943]: I1129 07:08:26.867974 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t85kr" Nov 29 07:08:27 crc kubenswrapper[4943]: I1129 07:08:27.428435 4943 generic.go:334] "Generic (PLEG): container finished" podID="3462ac03-a13b-49cf-ab08-b8d7a089a7e5" containerID="7a32de18e5615145f0c31b1d2ae3110319af8d1f6916b90ab0c68f221c990089" exitCode=0 Nov 29 07:08:27 crc kubenswrapper[4943]: I1129 07:08:27.428725 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-skrww" event={"ID":"3462ac03-a13b-49cf-ab08-b8d7a089a7e5","Type":"ContainerDied","Data":"7a32de18e5615145f0c31b1d2ae3110319af8d1f6916b90ab0c68f221c990089"} Nov 29 07:08:27 crc kubenswrapper[4943]: I1129 07:08:27.520103 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-t85kr"] Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.442978 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t85kr" event={"ID":"6e6b4461-55fa-4092-b3c4-bc414ea16f70","Type":"ContainerStarted","Data":"40f248b26805b5734d88fb3101bd5496c166b9674becd15aa395c7c682cf2470"} Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.757478 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.876944 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle\") pod \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.877255 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fcjp\" (UniqueName: \"kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp\") pod \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.877344 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data\") pod \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\" (UID: \"3462ac03-a13b-49cf-ab08-b8d7a089a7e5\") " Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.884287 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp" (OuterVolumeSpecName: "kube-api-access-9fcjp") pod "3462ac03-a13b-49cf-ab08-b8d7a089a7e5" (UID: "3462ac03-a13b-49cf-ab08-b8d7a089a7e5"). InnerVolumeSpecName "kube-api-access-9fcjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.909237 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3462ac03-a13b-49cf-ab08-b8d7a089a7e5" (UID: "3462ac03-a13b-49cf-ab08-b8d7a089a7e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.922965 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data" (OuterVolumeSpecName: "config-data") pod "3462ac03-a13b-49cf-ab08-b8d7a089a7e5" (UID: "3462ac03-a13b-49cf-ab08-b8d7a089a7e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.979283 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fcjp\" (UniqueName: \"kubernetes.io/projected/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-kube-api-access-9fcjp\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.979334 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:28 crc kubenswrapper[4943]: I1129 07:08:28.979346 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3462ac03-a13b-49cf-ab08-b8d7a089a7e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.451242 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-skrww" event={"ID":"3462ac03-a13b-49cf-ab08-b8d7a089a7e5","Type":"ContainerDied","Data":"29bdf2a190072d6eb4d34e6db45ef0501bd5fe9fa9f0da35b1cb8817fc7bbf46"} Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.451508 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29bdf2a190072d6eb4d34e6db45ef0501bd5fe9fa9f0da35b1cb8817fc7bbf46" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.451301 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-skrww" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.680508 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:29 crc kubenswrapper[4943]: E1129 07:08:29.680878 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3462ac03-a13b-49cf-ab08-b8d7a089a7e5" containerName="keystone-db-sync" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.680889 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3462ac03-a13b-49cf-ab08-b8d7a089a7e5" containerName="keystone-db-sync" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.681053 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3462ac03-a13b-49cf-ab08-b8d7a089a7e5" containerName="keystone-db-sync" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.681848 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.692341 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.753474 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-ddbgb"] Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.773471 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ddbgb"] Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.773600 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.787271 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-52hmr" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.787409 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.787744 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.787280 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.788122 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.791965 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.792033 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.792052 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.792074 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.792110 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6v54\" (UniqueName: \"kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.886064 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.888052 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.896836 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897056 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897127 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897176 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897196 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897222 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897270 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6v54\" (UniqueName: \"kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897288 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pljrr\" (UniqueName: \"kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897308 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897339 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897363 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.897589 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.898258 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.898749 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.899140 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.899753 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.900277 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:08:29 crc kubenswrapper[4943]: I1129 07:08:29.926740 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6v54\" (UniqueName: \"kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54\") pod \"dnsmasq-dns-75bb4695fc-pw9cs\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000626 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000693 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pljrr\" (UniqueName: \"kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000727 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000788 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000829 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000878 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000920 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.000993 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.001044 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.001093 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.001173 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66fh2\" (UniqueName: \"kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.001197 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.001221 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.011720 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.013933 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.017027 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.019340 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.039476 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.039630 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-dnmc7"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.040868 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.041802 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.046145 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v9j9p" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.046337 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.049481 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pljrr\" (UniqueName: \"kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr\") pod \"keystone-bootstrap-ddbgb\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.070287 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-6wspw"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.071402 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.075655 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.075919 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.076056 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-fnq66" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102583 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66fh2\" (UniqueName: \"kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102620 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102644 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102702 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102754 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102779 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.102799 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.105918 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.106248 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.109448 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.111068 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.111587 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.114084 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dnmc7"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.123472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.127123 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.132503 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-6wspw"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.145620 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66fh2\" (UniqueName: \"kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2\") pod \"ceilometer-0\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.146611 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-m6jf9"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.147616 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.153109 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hs7mc" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.153283 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.153385 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.160640 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-m6jf9"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.205618 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-99dmd"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.206995 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207044 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207066 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207102 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ffx2\" (UniqueName: \"kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207149 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207215 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.207256 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfnnt\" (UniqueName: \"kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.215148 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.215280 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-5xsnq" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.215953 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.226125 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-99dmd"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.256933 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.271372 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.295028 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.314880 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjm8q\" (UniqueName: \"kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.315050 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.315144 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.315252 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.315350 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ffx2\" (UniqueName: \"kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.315485 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316209 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316321 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316450 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316663 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfnnt\" (UniqueName: \"kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316805 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316896 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.316975 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.317124 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.317220 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ch68\" (UniqueName: \"kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.317308 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.317451 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.323700 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.330397 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.340080 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.343700 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.349328 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.358909 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ffx2\" (UniqueName: \"kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2\") pod \"barbican-db-sync-dnmc7\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.365184 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfnnt\" (UniqueName: \"kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt\") pod \"neutron-db-sync-6wspw\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.370473 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.426774 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427234 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427272 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427297 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427391 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427431 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ch68\" (UniqueName: \"kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427463 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427502 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427578 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjm8q\" (UniqueName: \"kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427618 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.427815 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.428691 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.431380 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.434914 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.437100 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.437615 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.440290 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.442007 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.444935 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.445233 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.459337 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ch68\" (UniqueName: \"kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68\") pod \"cinder-db-sync-m6jf9\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.469032 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjm8q\" (UniqueName: \"kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q\") pod \"placement-db-sync-99dmd\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.529217 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.529384 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.529703 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.529899 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.529995 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xks9w\" (UniqueName: \"kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.534173 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.550294 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-6wspw" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.569924 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.594815 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99dmd" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.595918 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.632334 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.632626 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.632703 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.632742 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xks9w\" (UniqueName: \"kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.632792 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.633975 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.633975 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.634657 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.635018 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.652312 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xks9w\" (UniqueName: \"kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w\") pod \"dnsmasq-dns-745b9ddc8c-gq7tj\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.659758 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ddbgb"] Nov 29 07:08:30 crc kubenswrapper[4943]: I1129 07:08:30.699736 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.000210 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.478665 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3e0df45-d658-47c8-8322-48bb269849dd","Type":"ContainerStarted","Data":"54fdd73b0f34ed5cab2ae0bf36373515053ea8a6c0e8325cc44f6926420be5f3"} Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.484417 4943 generic.go:334] "Generic (PLEG): container finished" podID="af13998a-d0fc-4e42-a825-034ea605eb54" containerID="4e9c3667e2049cdd260f05d4320e23182d55a7c4b7cb8c219a6567ee00a7179a" exitCode=0 Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.484491 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" event={"ID":"af13998a-d0fc-4e42-a825-034ea605eb54","Type":"ContainerDied","Data":"4e9c3667e2049cdd260f05d4320e23182d55a7c4b7cb8c219a6567ee00a7179a"} Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.484522 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" event={"ID":"af13998a-d0fc-4e42-a825-034ea605eb54","Type":"ContainerStarted","Data":"e0986b70a06d96c7065eb805ad899bfde47ac71ef7abe87355019d6a2443171d"} Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.491803 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ddbgb" event={"ID":"89fbe621-8487-46ff-b0cd-091564d4f56b","Type":"ContainerStarted","Data":"da186a175914ca6eb05a7638c8b22082f72526a28c543d11c2bb45498468f6b5"} Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.491846 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ddbgb" event={"ID":"89fbe621-8487-46ff-b0cd-091564d4f56b","Type":"ContainerStarted","Data":"47fe4988fc014df726c961a43f2f78f678edb0ffd89c30606cb30aff26267cb9"} Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:31.541025 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-ddbgb" podStartSLOduration=2.541005603 podStartE2EDuration="2.541005603s" podCreationTimestamp="2025-11-29 07:08:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:31.528221377 +0000 UTC m=+2086.458310130" watchObservedRunningTime="2025-11-29 07:08:31.541005603 +0000 UTC m=+2086.471094356" Nov 29 07:08:36 crc kubenswrapper[4943]: I1129 07:08:36.998378 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:08:37 crc kubenswrapper[4943]: I1129 07:08:37.281797 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:08:37 crc kubenswrapper[4943]: I1129 07:08:37.303891 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-6wspw"] Nov 29 07:08:37 crc kubenswrapper[4943]: I1129 07:08:37.312804 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-m6jf9"] Nov 29 07:08:37 crc kubenswrapper[4943]: I1129 07:08:37.322700 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dnmc7"] Nov 29 07:08:37 crc kubenswrapper[4943]: I1129 07:08:37.503908 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-99dmd"] Nov 29 07:08:38 crc kubenswrapper[4943]: I1129 07:08:38.550961 4943 generic.go:334] "Generic (PLEG): container finished" podID="89fbe621-8487-46ff-b0cd-091564d4f56b" containerID="da186a175914ca6eb05a7638c8b22082f72526a28c543d11c2bb45498468f6b5" exitCode=0 Nov 29 07:08:38 crc kubenswrapper[4943]: I1129 07:08:38.551054 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ddbgb" event={"ID":"89fbe621-8487-46ff-b0cd-091564d4f56b","Type":"ContainerDied","Data":"da186a175914ca6eb05a7638c8b22082f72526a28c543d11c2bb45498468f6b5"} Nov 29 07:08:51 crc kubenswrapper[4943]: E1129 07:08:51.710789 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 29 07:08:51 crc kubenswrapper[4943]: E1129 07:08:51.711524 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t6d7x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-t85kr_openstack(6e6b4461-55fa-4092-b3c4-bc414ea16f70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:08:51 crc kubenswrapper[4943]: E1129 07:08:51.712806 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-t85kr" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" Nov 29 07:08:51 crc kubenswrapper[4943]: I1129 07:08:51.765782 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerStarted","Data":"f9e619dca346ce16d896e63d5df090151301c2174701dd6413ee76b99fdbafde"} Nov 29 07:08:51 crc kubenswrapper[4943]: E1129 07:08:51.770226 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-t85kr" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" Nov 29 07:08:54 crc kubenswrapper[4943]: W1129 07:08:54.067883 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod157d6f54_3436_40f4_b2ef_c16933cbbc72.slice/crio-6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e WatchSource:0}: Error finding container 6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e: Status 404 returned error can't find the container with id 6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.190661 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.195748 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360318 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb\") pod \"af13998a-d0fc-4e42-a825-034ea605eb54\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360390 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360492 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb\") pod \"af13998a-d0fc-4e42-a825-034ea605eb54\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360523 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc\") pod \"af13998a-d0fc-4e42-a825-034ea605eb54\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360546 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360624 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360667 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config\") pod \"af13998a-d0fc-4e42-a825-034ea605eb54\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360701 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pljrr\" (UniqueName: \"kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360790 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360818 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts\") pod \"89fbe621-8487-46ff-b0cd-091564d4f56b\" (UID: \"89fbe621-8487-46ff-b0cd-091564d4f56b\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.360844 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6v54\" (UniqueName: \"kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54\") pod \"af13998a-d0fc-4e42-a825-034ea605eb54\" (UID: \"af13998a-d0fc-4e42-a825-034ea605eb54\") " Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.367609 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.368514 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54" (OuterVolumeSpecName: "kube-api-access-j6v54") pod "af13998a-d0fc-4e42-a825-034ea605eb54" (UID: "af13998a-d0fc-4e42-a825-034ea605eb54"). InnerVolumeSpecName "kube-api-access-j6v54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.371513 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts" (OuterVolumeSpecName: "scripts") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.373064 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr" (OuterVolumeSpecName: "kube-api-access-pljrr") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "kube-api-access-pljrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.374035 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.395925 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config" (OuterVolumeSpecName: "config") pod "af13998a-d0fc-4e42-a825-034ea605eb54" (UID: "af13998a-d0fc-4e42-a825-034ea605eb54"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.396918 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data" (OuterVolumeSpecName: "config-data") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.396998 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "af13998a-d0fc-4e42-a825-034ea605eb54" (UID: "af13998a-d0fc-4e42-a825-034ea605eb54"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.399617 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "af13998a-d0fc-4e42-a825-034ea605eb54" (UID: "af13998a-d0fc-4e42-a825-034ea605eb54"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.406113 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89fbe621-8487-46ff-b0cd-091564d4f56b" (UID: "89fbe621-8487-46ff-b0cd-091564d4f56b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.420900 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "af13998a-d0fc-4e42-a825-034ea605eb54" (UID: "af13998a-d0fc-4e42-a825-034ea605eb54"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463368 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463415 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463429 4943 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463440 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463449 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463460 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pljrr\" (UniqueName: \"kubernetes.io/projected/89fbe621-8487-46ff-b0cd-091564d4f56b-kube-api-access-pljrr\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463471 4943 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463479 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463487 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6v54\" (UniqueName: \"kubernetes.io/projected/af13998a-d0fc-4e42-a825-034ea605eb54-kube-api-access-j6v54\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463495 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af13998a-d0fc-4e42-a825-034ea605eb54-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.463503 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89fbe621-8487-46ff-b0cd-091564d4f56b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:08:54 crc kubenswrapper[4943]: E1129 07:08:54.792727 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 29 07:08:54 crc kubenswrapper[4943]: E1129 07:08:54.792933 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n75h658h65fhd8h5bdhbbh67fh6ch66h555h64fhfbh654h667h54bh97h4h88h8chfch546h5bh664hc4hf9h75h556h684hc8h68chch5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-66fh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b3e0df45-d658-47c8-8322-48bb269849dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.845333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99dmd" event={"ID":"923cc7a6-0fd5-44c0-a568-88eeccc8f31e","Type":"ContainerStarted","Data":"1fde00aa667be0c8379a2840df2b9d358ceda5bfad38acd879d2f73c74831373"} Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.890964 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" event={"ID":"af13998a-d0fc-4e42-a825-034ea605eb54","Type":"ContainerDied","Data":"e0986b70a06d96c7065eb805ad899bfde47ac71ef7abe87355019d6a2443171d"} Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.891280 4943 scope.go:117] "RemoveContainer" containerID="4e9c3667e2049cdd260f05d4320e23182d55a7c4b7cb8c219a6567ee00a7179a" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.891475 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bb4695fc-pw9cs" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.906442 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ddbgb" event={"ID":"89fbe621-8487-46ff-b0cd-091564d4f56b","Type":"ContainerDied","Data":"47fe4988fc014df726c961a43f2f78f678edb0ffd89c30606cb30aff26267cb9"} Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.906514 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47fe4988fc014df726c961a43f2f78f678edb0ffd89c30606cb30aff26267cb9" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.906679 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ddbgb" Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.924189 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m6jf9" event={"ID":"a3301c2a-4575-4e54-a396-d31fb9c5e427","Type":"ContainerStarted","Data":"326241d5dcc86bb2c5784c1f4a09490322d2d4372fa7311c65b3685e820ffb8b"} Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.940462 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-6wspw" event={"ID":"157d6f54-3436-40f4-b2ef-c16933cbbc72","Type":"ContainerStarted","Data":"6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e"} Nov 29 07:08:54 crc kubenswrapper[4943]: I1129 07:08:54.941955 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dnmc7" event={"ID":"00a997b6-77fe-4644-8034-6a35b7518421","Type":"ContainerStarted","Data":"f2ecf5d0add55eb21f8f18cf4da9cc008de2c94661b8cd4acc84529ca7122af9"} Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.036776 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.045502 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75bb4695fc-pw9cs"] Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.296008 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-ddbgb"] Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.308311 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-ddbgb"] Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.345986 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89fbe621-8487-46ff-b0cd-091564d4f56b" path="/var/lib/kubelet/pods/89fbe621-8487-46ff-b0cd-091564d4f56b/volumes" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.347755 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af13998a-d0fc-4e42-a825-034ea605eb54" path="/var/lib/kubelet/pods/af13998a-d0fc-4e42-a825-034ea605eb54/volumes" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.400245 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-mv6c9"] Nov 29 07:08:55 crc kubenswrapper[4943]: E1129 07:08:55.400613 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af13998a-d0fc-4e42-a825-034ea605eb54" containerName="init" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.400629 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="af13998a-d0fc-4e42-a825-034ea605eb54" containerName="init" Nov 29 07:08:55 crc kubenswrapper[4943]: E1129 07:08:55.400650 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89fbe621-8487-46ff-b0cd-091564d4f56b" containerName="keystone-bootstrap" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.400656 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="89fbe621-8487-46ff-b0cd-091564d4f56b" containerName="keystone-bootstrap" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.400845 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="af13998a-d0fc-4e42-a825-034ea605eb54" containerName="init" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.400865 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="89fbe621-8487-46ff-b0cd-091564d4f56b" containerName="keystone-bootstrap" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.402710 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.406354 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.406488 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.406687 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.406888 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.410771 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-52hmr" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.413633 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mv6c9"] Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.585888 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.586033 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.586083 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xtbx\" (UniqueName: \"kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.586261 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.586362 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.586385 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.687902 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.687999 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.688030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.688100 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.688154 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.688191 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xtbx\" (UniqueName: \"kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.694829 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.695367 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.698812 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.699508 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.710857 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.714408 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xtbx\" (UniqueName: \"kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx\") pod \"keystone-bootstrap-mv6c9\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.730232 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:08:55 crc kubenswrapper[4943]: I1129 07:08:55.961873 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerStarted","Data":"62abad4c67a25a37e880dfca5cbd646918fa8e0e136f4d5d885265a02207d876"} Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.249918 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mv6c9"] Nov 29 07:08:56 crc kubenswrapper[4943]: W1129 07:08:56.325459 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3660253d_7204_4091_8cb2_589517e751e7.slice/crio-84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1 WatchSource:0}: Error finding container 84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1: Status 404 returned error can't find the container with id 84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1 Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.974050 4943 generic.go:334] "Generic (PLEG): container finished" podID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerID="62abad4c67a25a37e880dfca5cbd646918fa8e0e136f4d5d885265a02207d876" exitCode=0 Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.974444 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerDied","Data":"62abad4c67a25a37e880dfca5cbd646918fa8e0e136f4d5d885265a02207d876"} Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.976917 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-6wspw" event={"ID":"157d6f54-3436-40f4-b2ef-c16933cbbc72","Type":"ContainerStarted","Data":"4ffeada44816bc9d2238de419bbe56d443a99b305ef92427a376d41d563da45b"} Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.979924 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mv6c9" event={"ID":"3660253d-7204-4091-8cb2-589517e751e7","Type":"ContainerStarted","Data":"17a43dedef1afa255466b91a097655f04ac7c357e2551c023d070c28c8719c35"} Nov 29 07:08:56 crc kubenswrapper[4943]: I1129 07:08:56.980017 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mv6c9" event={"ID":"3660253d-7204-4091-8cb2-589517e751e7","Type":"ContainerStarted","Data":"84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1"} Nov 29 07:08:57 crc kubenswrapper[4943]: I1129 07:08:57.046053 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-6wspw" podStartSLOduration=28.046028926 podStartE2EDuration="28.046028926s" podCreationTimestamp="2025-11-29 07:08:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:57.036535291 +0000 UTC m=+2111.966624064" watchObservedRunningTime="2025-11-29 07:08:57.046028926 +0000 UTC m=+2111.976117679" Nov 29 07:08:57 crc kubenswrapper[4943]: I1129 07:08:57.063970 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-mv6c9" podStartSLOduration=2.06394639 podStartE2EDuration="2.06394639s" podCreationTimestamp="2025-11-29 07:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:57.063188101 +0000 UTC m=+2111.993276854" watchObservedRunningTime="2025-11-29 07:08:57.06394639 +0000 UTC m=+2111.994035153" Nov 29 07:08:57 crc kubenswrapper[4943]: I1129 07:08:57.994481 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerStarted","Data":"15e64e0735b30046273a8fdc4062d174f393e16f5d2556c587d60525229e691b"} Nov 29 07:08:58 crc kubenswrapper[4943]: I1129 07:08:58.019330 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" podStartSLOduration=28.019314927 podStartE2EDuration="28.019314927s" podCreationTimestamp="2025-11-29 07:08:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:08:58.016721393 +0000 UTC m=+2112.946810166" watchObservedRunningTime="2025-11-29 07:08:58.019314927 +0000 UTC m=+2112.949403680" Nov 29 07:08:59 crc kubenswrapper[4943]: I1129 07:08:59.007616 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:09:05 crc kubenswrapper[4943]: I1129 07:09:05.701763 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:09:05 crc kubenswrapper[4943]: I1129 07:09:05.766042 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:09:05 crc kubenswrapper[4943]: I1129 07:09:05.766292 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" containerID="cri-o://22708a8daaa103d32550350f83eb33713ec48977461897c10ab2c1a34d1f3184" gracePeriod=10 Nov 29 07:09:07 crc kubenswrapper[4943]: I1129 07:09:07.118605 4943 generic.go:334] "Generic (PLEG): container finished" podID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerID="22708a8daaa103d32550350f83eb33713ec48977461897c10ab2c1a34d1f3184" exitCode=0 Nov 29 07:09:07 crc kubenswrapper[4943]: I1129 07:09:07.118659 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerDied","Data":"22708a8daaa103d32550350f83eb33713ec48977461897c10ab2c1a34d1f3184"} Nov 29 07:09:10 crc kubenswrapper[4943]: I1129 07:09:10.374621 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 29 07:09:15 crc kubenswrapper[4943]: I1129 07:09:15.374205 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 29 07:09:20 crc kubenswrapper[4943]: I1129 07:09:20.374234 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 29 07:09:20 crc kubenswrapper[4943]: I1129 07:09:20.375113 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:09:22 crc kubenswrapper[4943]: I1129 07:09:22.249081 4943 generic.go:334] "Generic (PLEG): container finished" podID="3660253d-7204-4091-8cb2-589517e751e7" containerID="17a43dedef1afa255466b91a097655f04ac7c357e2551c023d070c28c8719c35" exitCode=0 Nov 29 07:09:22 crc kubenswrapper[4943]: I1129 07:09:22.249172 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mv6c9" event={"ID":"3660253d-7204-4091-8cb2-589517e751e7","Type":"ContainerDied","Data":"17a43dedef1afa255466b91a097655f04ac7c357e2551c023d070c28c8719c35"} Nov 29 07:09:23 crc kubenswrapper[4943]: E1129 07:09:23.849192 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 29 07:09:23 crc kubenswrapper[4943]: E1129 07:09:23.849768 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4ffx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-dnmc7_openstack(00a997b6-77fe-4644-8034-6a35b7518421): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:09:23 crc kubenswrapper[4943]: E1129 07:09:23.851009 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-dnmc7" podUID="00a997b6-77fe-4644-8034-6a35b7518421" Nov 29 07:09:24 crc kubenswrapper[4943]: E1129 07:09:24.267025 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-dnmc7" podUID="00a997b6-77fe-4644-8034-6a35b7518421" Nov 29 07:09:24 crc kubenswrapper[4943]: E1129 07:09:24.831800 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified" Nov 29 07:09:24 crc kubenswrapper[4943]: E1129 07:09:24.831996 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-notification-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n75h658h65fhd8h5bdhbbh67fh6ch66h555h64fhfbh654h667h54bh97h4h88h8chfch546h5bh664hc4hf9h75h556h684hc8h68chch5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-notification-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-66fh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/notificationhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b3e0df45-d658-47c8-8322-48bb269849dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:09:24 crc kubenswrapper[4943]: I1129 07:09:24.887265 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.021605 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc\") pod \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.021690 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config\") pod \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.021762 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb\") pod \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.021838 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmlvb\" (UniqueName: \"kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb\") pod \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.021868 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb\") pod \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\" (UID: \"67d225ea-2f1a-4270-8668-6a6001bd7c8c\") " Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.032407 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb" (OuterVolumeSpecName: "kube-api-access-cmlvb") pod "67d225ea-2f1a-4270-8668-6a6001bd7c8c" (UID: "67d225ea-2f1a-4270-8668-6a6001bd7c8c"). InnerVolumeSpecName "kube-api-access-cmlvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.063625 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "67d225ea-2f1a-4270-8668-6a6001bd7c8c" (UID: "67d225ea-2f1a-4270-8668-6a6001bd7c8c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.064806 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "67d225ea-2f1a-4270-8668-6a6001bd7c8c" (UID: "67d225ea-2f1a-4270-8668-6a6001bd7c8c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.068398 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67d225ea-2f1a-4270-8668-6a6001bd7c8c" (UID: "67d225ea-2f1a-4270-8668-6a6001bd7c8c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.080901 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config" (OuterVolumeSpecName: "config") pod "67d225ea-2f1a-4270-8668-6a6001bd7c8c" (UID: "67d225ea-2f1a-4270-8668-6a6001bd7c8c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.125637 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.125675 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.125690 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.125703 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmlvb\" (UniqueName: \"kubernetes.io/projected/67d225ea-2f1a-4270-8668-6a6001bd7c8c-kube-api-access-cmlvb\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.125715 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67d225ea-2f1a-4270-8668-6a6001bd7c8c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.273910 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" event={"ID":"67d225ea-2f1a-4270-8668-6a6001bd7c8c","Type":"ContainerDied","Data":"341abfff15d9ccaa1fad314896acd2ef8eac5bbeb06ccfcc41f15d706076b69d"} Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.273971 4943 scope.go:117] "RemoveContainer" containerID="22708a8daaa103d32550350f83eb33713ec48977461897c10ab2c1a34d1f3184" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.273986 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hbfzw" Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.309693 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.315599 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hbfzw"] Nov 29 07:09:25 crc kubenswrapper[4943]: I1129 07:09:25.362459 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" path="/var/lib/kubelet/pods/67d225ea-2f1a-4270-8668-6a6001bd7c8c/volumes" Nov 29 07:09:29 crc kubenswrapper[4943]: I1129 07:09:29.853795 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-dt7vh" podUID="4ad8fae5-ebf3-406c-b971-f15b1978e82c" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:09:32 crc kubenswrapper[4943]: I1129 07:09:32.613449 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:09:32 crc kubenswrapper[4943]: I1129 07:09:32.613859 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.833714 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:09:38 crc kubenswrapper[4943]: E1129 07:09:38.835031 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.835058 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" Nov 29 07:09:38 crc kubenswrapper[4943]: E1129 07:09:38.835071 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="init" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.835078 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="init" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.835279 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="67d225ea-2f1a-4270-8668-6a6001bd7c8c" containerName="dnsmasq-dns" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.836859 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.844224 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.962888 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.963026 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:38 crc kubenswrapper[4943]: I1129 07:09:38.963126 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc88n\" (UniqueName: \"kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.064613 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.064756 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc88n\" (UniqueName: \"kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.065144 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.065263 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.065693 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.091867 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc88n\" (UniqueName: \"kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n\") pod \"redhat-operators-w9x9p\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:39 crc kubenswrapper[4943]: I1129 07:09:39.155601 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.639312 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760130 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760277 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760362 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760392 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760479 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.760531 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xtbx\" (UniqueName: \"kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx\") pod \"3660253d-7204-4091-8cb2-589517e751e7\" (UID: \"3660253d-7204-4091-8cb2-589517e751e7\") " Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.768191 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.768232 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.785674 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts" (OuterVolumeSpecName: "scripts") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.785985 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx" (OuterVolumeSpecName: "kube-api-access-6xtbx") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "kube-api-access-6xtbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.790739 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data" (OuterVolumeSpecName: "config-data") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.794442 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3660253d-7204-4091-8cb2-589517e751e7" (UID: "3660253d-7204-4091-8cb2-589517e751e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862840 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862875 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862885 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xtbx\" (UniqueName: \"kubernetes.io/projected/3660253d-7204-4091-8cb2-589517e751e7-kube-api-access-6xtbx\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862894 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862904 4943 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:44 crc kubenswrapper[4943]: I1129 07:09:44.862912 4943 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3660253d-7204-4091-8cb2-589517e751e7-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.440945 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mv6c9" event={"ID":"3660253d-7204-4091-8cb2-589517e751e7","Type":"ContainerDied","Data":"84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1"} Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.441227 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84d63b93ea3ba8649e043b44b1568f7fdc8767f8de45d27d007c5aff870e9da1" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.441042 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mv6c9" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.750858 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6f668984b9-kxcdm"] Nov 29 07:09:45 crc kubenswrapper[4943]: E1129 07:09:45.751298 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3660253d-7204-4091-8cb2-589517e751e7" containerName="keystone-bootstrap" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.751314 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3660253d-7204-4091-8cb2-589517e751e7" containerName="keystone-bootstrap" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.751545 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3660253d-7204-4091-8cb2-589517e751e7" containerName="keystone-bootstrap" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.752251 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.761377 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.761676 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.761789 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.761891 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-52hmr" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.762061 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.766511 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6f668984b9-kxcdm"] Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.766906 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.881747 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-credential-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.882335 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7csh2\" (UniqueName: \"kubernetes.io/projected/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-kube-api-access-7csh2\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.882477 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-public-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.882587 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-scripts\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.882808 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-internal-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.882948 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-combined-ca-bundle\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.883131 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-config-data\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.883582 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-fernet-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985123 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7csh2\" (UniqueName: \"kubernetes.io/projected/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-kube-api-access-7csh2\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985494 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-public-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985523 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-scripts\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-internal-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985598 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-combined-ca-bundle\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985642 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-config-data\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985669 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-fernet-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.985710 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-credential-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.992976 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-public-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.993006 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-combined-ca-bundle\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.994072 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-scripts\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.998373 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-internal-tls-certs\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:45 crc kubenswrapper[4943]: I1129 07:09:45.998618 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-credential-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:46 crc kubenswrapper[4943]: I1129 07:09:46.004713 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7csh2\" (UniqueName: \"kubernetes.io/projected/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-kube-api-access-7csh2\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:46 crc kubenswrapper[4943]: I1129 07:09:46.008142 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-config-data\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:46 crc kubenswrapper[4943]: I1129 07:09:46.014071 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d5336ed-f3f6-4d44-9f80-d6b00646c97d-fernet-keys\") pod \"keystone-6f668984b9-kxcdm\" (UID: \"6d5336ed-f3f6-4d44-9f80-d6b00646c97d\") " pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:46 crc kubenswrapper[4943]: I1129 07:09:46.075054 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.516927 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.517317 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t6d7x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-t85kr_openstack(6e6b4461-55fa-4092-b3c4-bc414ea16f70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.519327 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-t85kr" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" Nov 29 07:09:48 crc kubenswrapper[4943]: I1129 07:09:48.532427 4943 scope.go:117] "RemoveContainer" containerID="2cffdf91f2e436cecc6bc18b94af5b4e5dcab345f341000e6050435f1c28e2bf" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.575918 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.576412 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ch68,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-m6jf9_openstack(a3301c2a-4575-4e54-a396-d31fb9c5e427): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:09:48 crc kubenswrapper[4943]: E1129 07:09:48.577749 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-m6jf9" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" Nov 29 07:09:49 crc kubenswrapper[4943]: E1129 07:09:49.482951 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-m6jf9" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" Nov 29 07:10:02 crc kubenswrapper[4943]: E1129 07:10:02.328953 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-t85kr" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" Nov 29 07:10:02 crc kubenswrapper[4943]: I1129 07:10:02.613321 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:10:02 crc kubenswrapper[4943]: I1129 07:10:02.613410 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:10:06 crc kubenswrapper[4943]: E1129 07:10:06.430723 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 29 07:10:06 crc kubenswrapper[4943]: E1129 07:10:06.431698 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4ffx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-dnmc7_openstack(00a997b6-77fe-4644-8034-6a35b7518421): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:10:06 crc kubenswrapper[4943]: E1129 07:10:06.432785 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-dnmc7" podUID="00a997b6-77fe-4644-8034-6a35b7518421" Nov 29 07:10:06 crc kubenswrapper[4943]: I1129 07:10:06.884848 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:10:19 crc kubenswrapper[4943]: E1129 07:10:19.328784 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-dnmc7" podUID="00a997b6-77fe-4644-8034-6a35b7518421" Nov 29 07:10:27 crc kubenswrapper[4943]: W1129 07:10:27.612891 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46b6a55c_a2cb_4930_a34c_4abe5d84525e.slice/crio-354fec63f86c68caf8d5952c2f8d5c673b64b8a0e3241d052bdbd847ea0e7466 WatchSource:0}: Error finding container 354fec63f86c68caf8d5952c2f8d5c673b64b8a0e3241d052bdbd847ea0e7466: Status 404 returned error can't find the container with id 354fec63f86c68caf8d5952c2f8d5c673b64b8a0e3241d052bdbd847ea0e7466 Nov 29 07:10:27 crc kubenswrapper[4943]: E1129 07:10:27.622434 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 29 07:10:27 crc kubenswrapper[4943]: E1129 07:10:27.622579 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ch68,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-m6jf9_openstack(a3301c2a-4575-4e54-a396-d31fb9c5e427): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:10:27 crc kubenswrapper[4943]: E1129 07:10:27.623992 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-m6jf9" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" Nov 29 07:10:27 crc kubenswrapper[4943]: I1129 07:10:27.860140 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerStarted","Data":"354fec63f86c68caf8d5952c2f8d5c673b64b8a0e3241d052bdbd847ea0e7466"} Nov 29 07:10:28 crc kubenswrapper[4943]: I1129 07:10:28.031511 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6f668984b9-kxcdm"] Nov 29 07:10:28 crc kubenswrapper[4943]: I1129 07:10:28.869489 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f668984b9-kxcdm" event={"ID":"6d5336ed-f3f6-4d44-9f80-d6b00646c97d","Type":"ContainerStarted","Data":"b6545620525709705c10ceaa28e97c31a6219d6383ae661ae38f27d0fe30727b"} Nov 29 07:10:31 crc kubenswrapper[4943]: I1129 07:10:31.891629 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerStarted","Data":"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203"} Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.613530 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.613616 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.613655 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.614310 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.614380 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65" gracePeriod=600 Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.902049 4943 generic.go:334] "Generic (PLEG): container finished" podID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerID="43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203" exitCode=0 Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.902156 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerDied","Data":"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203"} Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.906019 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f668984b9-kxcdm" event={"ID":"6d5336ed-f3f6-4d44-9f80-d6b00646c97d","Type":"ContainerStarted","Data":"84b850bbaafbc7e980103fa60629185393b71bdf68ad958a8914739a9fe5b127"} Nov 29 07:10:32 crc kubenswrapper[4943]: I1129 07:10:32.907982 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99dmd" event={"ID":"923cc7a6-0fd5-44c0-a568-88eeccc8f31e","Type":"ContainerStarted","Data":"6f6c601d2ad15d893b0eb2bcf658e32f1935e1ae81528cf262bc71375b1b5908"} Nov 29 07:10:34 crc kubenswrapper[4943]: I1129 07:10:34.951387 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-99dmd" podStartSLOduration=52.63376361 podStartE2EDuration="2m4.951366468s" podCreationTimestamp="2025-11-29 07:08:30 +0000 UTC" firstStartedPulling="2025-11-29 07:08:54.08360692 +0000 UTC m=+2109.013695673" lastFinishedPulling="2025-11-29 07:10:06.401209778 +0000 UTC m=+2181.331298531" observedRunningTime="2025-11-29 07:10:34.944736274 +0000 UTC m=+2209.874825027" watchObservedRunningTime="2025-11-29 07:10:34.951366468 +0000 UTC m=+2209.881455221" Nov 29 07:10:35 crc kubenswrapper[4943]: E1129 07:10:35.883255 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/sg-core:latest" Nov 29 07:10:35 crc kubenswrapper[4943]: E1129 07:10:35.883490 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:sg-core,Image:quay.io/openstack-k8s-operators/sg-core:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:sg-core-conf-yaml,ReadOnly:false,MountPath:/etc/sg-core.conf.yaml,SubPath:sg-core.conf.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-66fh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b3e0df45-d658-47c8-8322-48bb269849dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:10:35 crc kubenswrapper[4943]: I1129 07:10:35.932108 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65" exitCode=0 Nov 29 07:10:35 crc kubenswrapper[4943]: I1129 07:10:35.932201 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65"} Nov 29 07:10:35 crc kubenswrapper[4943]: I1129 07:10:35.932264 4943 scope.go:117] "RemoveContainer" containerID="5185495d7cae328bdeaaaaaa287ce6358b6b70e9e015d2b1835ba8ba9ebddc15" Nov 29 07:10:35 crc kubenswrapper[4943]: I1129 07:10:35.932322 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:10:35 crc kubenswrapper[4943]: I1129 07:10:35.955670 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6f668984b9-kxcdm" podStartSLOduration=50.955649242 podStartE2EDuration="50.955649242s" podCreationTimestamp="2025-11-29 07:09:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:10:35.951874608 +0000 UTC m=+2210.881963381" watchObservedRunningTime="2025-11-29 07:10:35.955649242 +0000 UTC m=+2210.885738015" Nov 29 07:10:36 crc kubenswrapper[4943]: I1129 07:10:36.943544 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d"} Nov 29 07:10:38 crc kubenswrapper[4943]: I1129 07:10:38.511302 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6f668984b9-kxcdm" Nov 29 07:10:41 crc kubenswrapper[4943]: E1129 07:10:41.518785 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-m6jf9" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.270382 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.271800 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.275673 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.275688 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.276705 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-w62cr" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.283284 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.371889 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.371959 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2vb5\" (UniqueName: \"kubernetes.io/projected/21d04d3f-3885-4bd5-a28a-7539ab86bf24-kube-api-access-k2vb5\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.372179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config-secret\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.372250 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.474840 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.474929 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2vb5\" (UniqueName: \"kubernetes.io/projected/21d04d3f-3885-4bd5-a28a-7539ab86bf24-kube-api-access-k2vb5\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.474980 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config-secret\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.475015 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.476248 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.484801 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.486488 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21d04d3f-3885-4bd5-a28a-7539ab86bf24-openstack-config-secret\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.502244 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2vb5\" (UniqueName: \"kubernetes.io/projected/21d04d3f-3885-4bd5-a28a-7539ab86bf24-kube-api-access-k2vb5\") pod \"openstackclient\" (UID: \"21d04d3f-3885-4bd5-a28a-7539ab86bf24\") " pod="openstack/openstackclient" Nov 29 07:10:42 crc kubenswrapper[4943]: I1129 07:10:42.602415 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 07:10:51 crc kubenswrapper[4943]: I1129 07:10:51.812398 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 29 07:10:52 crc kubenswrapper[4943]: I1129 07:10:52.097404 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerStarted","Data":"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351"} Nov 29 07:10:52 crc kubenswrapper[4943]: I1129 07:10:52.100176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dnmc7" event={"ID":"00a997b6-77fe-4644-8034-6a35b7518421","Type":"ContainerStarted","Data":"a1cdb08a04fe78ea7a9ec26fdf44580ba70fd66c8040c2f7e07a655e73668168"} Nov 29 07:10:54 crc kubenswrapper[4943]: I1129 07:10:54.122471 4943 generic.go:334] "Generic (PLEG): container finished" podID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerID="5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351" exitCode=0 Nov 29 07:10:54 crc kubenswrapper[4943]: I1129 07:10:54.126634 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerDied","Data":"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351"} Nov 29 07:10:54 crc kubenswrapper[4943]: I1129 07:10:54.126736 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t85kr" event={"ID":"6e6b4461-55fa-4092-b3c4-bc414ea16f70","Type":"ContainerStarted","Data":"f4c31a8fb5196326cd984ff41e04d48ad5a2aebc72c4a42ca33a086806c151fa"} Nov 29 07:10:54 crc kubenswrapper[4943]: I1129 07:10:54.152467 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-dnmc7" podStartSLOduration=27.910467713 podStartE2EDuration="2m25.152447297s" podCreationTimestamp="2025-11-29 07:08:29 +0000 UTC" firstStartedPulling="2025-11-29 07:08:54.097875453 +0000 UTC m=+2109.027964206" lastFinishedPulling="2025-11-29 07:10:51.339855037 +0000 UTC m=+2226.269943790" observedRunningTime="2025-11-29 07:10:52.128807136 +0000 UTC m=+2227.058895889" watchObservedRunningTime="2025-11-29 07:10:54.152447297 +0000 UTC m=+2229.082536050" Nov 29 07:10:54 crc kubenswrapper[4943]: I1129 07:10:54.166773 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-t85kr" podStartSLOduration=4.356840736 podStartE2EDuration="2m28.166751821s" podCreationTimestamp="2025-11-29 07:08:26 +0000 UTC" firstStartedPulling="2025-11-29 07:08:27.529746787 +0000 UTC m=+2082.459835540" lastFinishedPulling="2025-11-29 07:10:51.339657872 +0000 UTC m=+2226.269746625" observedRunningTime="2025-11-29 07:10:54.164828774 +0000 UTC m=+2229.094917537" watchObservedRunningTime="2025-11-29 07:10:54.166751821 +0000 UTC m=+2229.096840574" Nov 29 07:11:02 crc kubenswrapper[4943]: W1129 07:11:02.111703 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21d04d3f_3885_4bd5_a28a_7539ab86bf24.slice/crio-c9a6ce77a0877e5c26f36e7748fa37e2cad7efdfcefab5ef07dd8b33cd0d890a WatchSource:0}: Error finding container c9a6ce77a0877e5c26f36e7748fa37e2cad7efdfcefab5ef07dd8b33cd0d890a: Status 404 returned error can't find the container with id c9a6ce77a0877e5c26f36e7748fa37e2cad7efdfcefab5ef07dd8b33cd0d890a Nov 29 07:11:02 crc kubenswrapper[4943]: I1129 07:11:02.198494 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"21d04d3f-3885-4bd5-a28a-7539ab86bf24","Type":"ContainerStarted","Data":"c9a6ce77a0877e5c26f36e7748fa37e2cad7efdfcefab5ef07dd8b33cd0d890a"} Nov 29 07:11:02 crc kubenswrapper[4943]: E1129 07:11:02.974965 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Nov 29 07:11:02 crc kubenswrapper[4943]: E1129 07:11:02.975457 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-66fh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b3e0df45-d658-47c8-8322-48bb269849dd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 07:11:02 crc kubenswrapper[4943]: E1129 07:11:02.976692 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"ceilometer-notification-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"sg-core\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="b3e0df45-d658-47c8-8322-48bb269849dd" Nov 29 07:11:03 crc kubenswrapper[4943]: I1129 07:11:03.905364 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.071753 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.071968 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072050 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072098 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072161 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072244 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66fh2\" (UniqueName: \"kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072298 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle\") pod \"b3e0df45-d658-47c8-8322-48bb269849dd\" (UID: \"b3e0df45-d658-47c8-8322-48bb269849dd\") " Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.072701 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.073210 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.073916 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.073950 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3e0df45-d658-47c8-8322-48bb269849dd-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.078535 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts" (OuterVolumeSpecName: "scripts") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.078744 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2" (OuterVolumeSpecName: "kube-api-access-66fh2") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "kube-api-access-66fh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.080061 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data" (OuterVolumeSpecName: "config-data") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.080716 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.082396 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3e0df45-d658-47c8-8322-48bb269849dd" (UID: "b3e0df45-d658-47c8-8322-48bb269849dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.175989 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.176044 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.176060 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66fh2\" (UniqueName: \"kubernetes.io/projected/b3e0df45-d658-47c8-8322-48bb269849dd-kube-api-access-66fh2\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.176078 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.176142 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e0df45-d658-47c8-8322-48bb269849dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.231876 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerStarted","Data":"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae"} Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.235310 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3e0df45-d658-47c8-8322-48bb269849dd","Type":"ContainerDied","Data":"54fdd73b0f34ed5cab2ae0bf36373515053ea8a6c0e8325cc44f6926420be5f3"} Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.235484 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.260260 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w9x9p" podStartSLOduration=57.98543702 podStartE2EDuration="1m26.260237705s" podCreationTimestamp="2025-11-29 07:09:38 +0000 UTC" firstStartedPulling="2025-11-29 07:10:34.924593044 +0000 UTC m=+2209.854681797" lastFinishedPulling="2025-11-29 07:11:03.199393729 +0000 UTC m=+2238.129482482" observedRunningTime="2025-11-29 07:11:04.250649147 +0000 UTC m=+2239.180737910" watchObservedRunningTime="2025-11-29 07:11:04.260237705 +0000 UTC m=+2239.190326458" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.312345 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.332690 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.348649 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.351423 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.355097 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.356014 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.362594 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378620 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378686 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk992\" (UniqueName: \"kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378718 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378788 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378815 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378885 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.378933 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480184 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480277 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480365 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480410 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk992\" (UniqueName: \"kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480437 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480486 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.480513 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.481124 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.482993 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.486398 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.486884 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.497958 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.498317 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.505377 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk992\" (UniqueName: \"kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992\") pod \"ceilometer-0\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " pod="openstack/ceilometer-0" Nov 29 07:11:04 crc kubenswrapper[4943]: I1129 07:11:04.673861 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:11:05 crc kubenswrapper[4943]: I1129 07:11:05.061407 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:11:05 crc kubenswrapper[4943]: I1129 07:11:05.245700 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerStarted","Data":"19cf3dac373ac6f844bd35ff183914958ca1219b4f0ba6e7d556d9023f5dea42"} Nov 29 07:11:05 crc kubenswrapper[4943]: I1129 07:11:05.247590 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m6jf9" event={"ID":"a3301c2a-4575-4e54-a396-d31fb9c5e427","Type":"ContainerStarted","Data":"eccb4dbe92cf56c6596c85432ca0f1425762c93367c44aaa8cd07ab960accd45"} Nov 29 07:11:05 crc kubenswrapper[4943]: I1129 07:11:05.350738 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3e0df45-d658-47c8-8322-48bb269849dd" path="/var/lib/kubelet/pods/b3e0df45-d658-47c8-8322-48bb269849dd/volumes" Nov 29 07:11:06 crc kubenswrapper[4943]: I1129 07:11:06.357619 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-m6jf9" podStartSLOduration=26.702327086 podStartE2EDuration="2m36.357557122s" podCreationTimestamp="2025-11-29 07:08:30 +0000 UTC" firstStartedPulling="2025-11-29 07:08:54.081073297 +0000 UTC m=+2109.011162050" lastFinishedPulling="2025-11-29 07:11:03.736303333 +0000 UTC m=+2238.666392086" observedRunningTime="2025-11-29 07:11:06.348688112 +0000 UTC m=+2241.278776865" watchObservedRunningTime="2025-11-29 07:11:06.357557122 +0000 UTC m=+2241.287645875" Nov 29 07:11:07 crc kubenswrapper[4943]: I1129 07:11:07.312996 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerStarted","Data":"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0"} Nov 29 07:11:08 crc kubenswrapper[4943]: I1129 07:11:08.327954 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerStarted","Data":"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be"} Nov 29 07:11:09 crc kubenswrapper[4943]: I1129 07:11:09.156164 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:09 crc kubenswrapper[4943]: I1129 07:11:09.156615 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:10 crc kubenswrapper[4943]: I1129 07:11:10.220307 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w9x9p" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="registry-server" probeResult="failure" output=< Nov 29 07:11:10 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:11:10 crc kubenswrapper[4943]: > Nov 29 07:11:19 crc kubenswrapper[4943]: I1129 07:11:19.199185 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:19 crc kubenswrapper[4943]: I1129 07:11:19.247166 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:19 crc kubenswrapper[4943]: I1129 07:11:19.447604 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:11:20 crc kubenswrapper[4943]: E1129 07:11:20.298078 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Nov 29 07:11:20 crc kubenswrapper[4943]: E1129 07:11:20.298369 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf8h94h5bfh685h75h5b6h5h645h66h588h67dh8fh5ch698h679h5fchcfh66h5c8h558h594hc6h5c8h88h585hbbh68fh5b9h64fh585h8fhc6q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k2vb5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(21d04d3f-3885-4bd5-a28a-7539ab86bf24): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:11:20 crc kubenswrapper[4943]: E1129 07:11:20.299644 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="21d04d3f-3885-4bd5-a28a-7539ab86bf24" Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.459496 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w9x9p" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="registry-server" containerID="cri-o://f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae" gracePeriod=2 Nov 29 07:11:20 crc kubenswrapper[4943]: E1129 07:11:20.461911 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="21d04d3f-3885-4bd5-a28a-7539ab86bf24" Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.877644 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.959320 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc88n\" (UniqueName: \"kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n\") pod \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.959626 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content\") pod \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.959752 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities\") pod \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\" (UID: \"46b6a55c-a2cb-4930-a34c-4abe5d84525e\") " Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.960491 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities" (OuterVolumeSpecName: "utilities") pod "46b6a55c-a2cb-4930-a34c-4abe5d84525e" (UID: "46b6a55c-a2cb-4930-a34c-4abe5d84525e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:11:20 crc kubenswrapper[4943]: I1129 07:11:20.965607 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n" (OuterVolumeSpecName: "kube-api-access-jc88n") pod "46b6a55c-a2cb-4930-a34c-4abe5d84525e" (UID: "46b6a55c-a2cb-4930-a34c-4abe5d84525e"). InnerVolumeSpecName "kube-api-access-jc88n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.061503 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.061578 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc88n\" (UniqueName: \"kubernetes.io/projected/46b6a55c-a2cb-4930-a34c-4abe5d84525e-kube-api-access-jc88n\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.069877 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46b6a55c-a2cb-4930-a34c-4abe5d84525e" (UID: "46b6a55c-a2cb-4930-a34c-4abe5d84525e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.163175 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46b6a55c-a2cb-4930-a34c-4abe5d84525e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.468888 4943 generic.go:334] "Generic (PLEG): container finished" podID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerID="f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae" exitCode=0 Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.468941 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w9x9p" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.468987 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerDied","Data":"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae"} Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.469026 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w9x9p" event={"ID":"46b6a55c-a2cb-4930-a34c-4abe5d84525e","Type":"ContainerDied","Data":"354fec63f86c68caf8d5952c2f8d5c673b64b8a0e3241d052bdbd847ea0e7466"} Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.469046 4943 scope.go:117] "RemoveContainer" containerID="f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.472366 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerStarted","Data":"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f"} Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.494293 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.495162 4943 scope.go:117] "RemoveContainer" containerID="5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.503186 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w9x9p"] Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.518753 4943 scope.go:117] "RemoveContainer" containerID="43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.558013 4943 scope.go:117] "RemoveContainer" containerID="f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae" Nov 29 07:11:21 crc kubenswrapper[4943]: E1129 07:11:21.558695 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae\": container with ID starting with f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae not found: ID does not exist" containerID="f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.558756 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae"} err="failed to get container status \"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae\": rpc error: code = NotFound desc = could not find container \"f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae\": container with ID starting with f34ae12f3f8d0d1bc226c58e3ece07686029feb313df09be3c2846019ce38dae not found: ID does not exist" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.558793 4943 scope.go:117] "RemoveContainer" containerID="5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351" Nov 29 07:11:21 crc kubenswrapper[4943]: E1129 07:11:21.562819 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351\": container with ID starting with 5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351 not found: ID does not exist" containerID="5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.562877 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351"} err="failed to get container status \"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351\": rpc error: code = NotFound desc = could not find container \"5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351\": container with ID starting with 5e08a4ac58c4046c4aa1c9ccadc6534493adbd5aaa5d5e2232ea9bd8ff78d351 not found: ID does not exist" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.562914 4943 scope.go:117] "RemoveContainer" containerID="43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203" Nov 29 07:11:21 crc kubenswrapper[4943]: E1129 07:11:21.563457 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203\": container with ID starting with 43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203 not found: ID does not exist" containerID="43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203" Nov 29 07:11:21 crc kubenswrapper[4943]: I1129 07:11:21.563507 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203"} err="failed to get container status \"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203\": rpc error: code = NotFound desc = could not find container \"43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203\": container with ID starting with 43bcd74c7f896f550ec45de3bfe0ba121df7abcd34927a08eb4e647b0e894203 not found: ID does not exist" Nov 29 07:11:23 crc kubenswrapper[4943]: I1129 07:11:23.339492 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" path="/var/lib/kubelet/pods/46b6a55c-a2cb-4930-a34c-4abe5d84525e/volumes" Nov 29 07:11:32 crc kubenswrapper[4943]: I1129 07:11:32.578936 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerStarted","Data":"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864"} Nov 29 07:11:33 crc kubenswrapper[4943]: I1129 07:11:33.587756 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:11:33 crc kubenswrapper[4943]: I1129 07:11:33.613930 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.473796012 podStartE2EDuration="29.613910061s" podCreationTimestamp="2025-11-29 07:11:04 +0000 UTC" firstStartedPulling="2025-11-29 07:11:05.072141782 +0000 UTC m=+2240.002230535" lastFinishedPulling="2025-11-29 07:11:32.212255831 +0000 UTC m=+2267.142344584" observedRunningTime="2025-11-29 07:11:33.613009218 +0000 UTC m=+2268.543097991" watchObservedRunningTime="2025-11-29 07:11:33.613910061 +0000 UTC m=+2268.543998814" Nov 29 07:11:40 crc kubenswrapper[4943]: I1129 07:11:40.648820 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"21d04d3f-3885-4bd5-a28a-7539ab86bf24","Type":"ContainerStarted","Data":"857fef3c86b7d06618078aa3ebcf02b85ee96bd9fa2c3f587ee4b7b8742c0fd1"} Nov 29 07:11:40 crc kubenswrapper[4943]: I1129 07:11:40.664117 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=21.148625541 podStartE2EDuration="58.664099998s" podCreationTimestamp="2025-11-29 07:10:42 +0000 UTC" firstStartedPulling="2025-11-29 07:11:02.114147829 +0000 UTC m=+2237.044236582" lastFinishedPulling="2025-11-29 07:11:39.629622286 +0000 UTC m=+2274.559711039" observedRunningTime="2025-11-29 07:11:40.663256468 +0000 UTC m=+2275.593345241" watchObservedRunningTime="2025-11-29 07:11:40.664099998 +0000 UTC m=+2275.594188771" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.962770 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-jrkxk"] Nov 29 07:11:53 crc kubenswrapper[4943]: E1129 07:11:53.963602 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="registry-server" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.963615 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="registry-server" Nov 29 07:11:53 crc kubenswrapper[4943]: E1129 07:11:53.963626 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="extract-content" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.963632 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="extract-content" Nov 29 07:11:53 crc kubenswrapper[4943]: E1129 07:11:53.963645 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="extract-utilities" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.963652 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="extract-utilities" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.963805 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="46b6a55c-a2cb-4930-a34c-4abe5d84525e" containerName="registry-server" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.964335 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:53 crc kubenswrapper[4943]: I1129 07:11:53.974841 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-jrkxk"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.008024 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.008218 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl2vw\" (UniqueName: \"kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.057303 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-mkjph"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.058517 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.065701 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-195a-account-create-update-cthml"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.066764 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.070965 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.075434 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mkjph"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.092737 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-195a-account-create-update-cthml"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109201 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109255 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109286 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl2vw\" (UniqueName: \"kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109332 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109407 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2g46\" (UniqueName: \"kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109431 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkzsm\" (UniqueName: \"kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.109943 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.131230 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl2vw\" (UniqueName: \"kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw\") pod \"nova-api-db-create-jrkxk\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.211213 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.211611 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2g46\" (UniqueName: \"kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.211728 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkzsm\" (UniqueName: \"kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.211867 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.212223 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.212724 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.230199 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkzsm\" (UniqueName: \"kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm\") pod \"nova-api-195a-account-create-update-cthml\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.243485 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2g46\" (UniqueName: \"kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46\") pod \"nova-cell0-db-create-mkjph\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.275339 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-54gvp"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.276762 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.281054 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.281049 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-0721-account-create-update-v4z8w"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.282679 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.285686 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.290455 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-54gvp"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.298753 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-0721-account-create-update-v4z8w"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.313270 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.313333 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5rgz\" (UniqueName: \"kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.313387 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.313631 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlz5c\" (UniqueName: \"kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.382991 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.399560 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.422805 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.422907 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5rgz\" (UniqueName: \"kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.423053 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.423214 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlz5c\" (UniqueName: \"kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.424766 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.424804 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.443110 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlz5c\" (UniqueName: \"kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c\") pod \"nova-cell1-db-create-54gvp\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.453133 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5rgz\" (UniqueName: \"kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz\") pod \"nova-cell0-0721-account-create-update-v4z8w\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.474988 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-14c7-account-create-update-qxjwt"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.475944 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.498100 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-14c7-account-create-update-qxjwt"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.498476 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.526460 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.526792 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb7tj\" (UniqueName: \"kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.628652 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.628794 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb7tj\" (UniqueName: \"kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.629750 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.651747 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb7tj\" (UniqueName: \"kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj\") pod \"nova-cell1-14c7-account-create-update-qxjwt\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.706150 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.715446 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.785113 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-jrkxk"] Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.839159 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.951010 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-195a-account-create-update-cthml"] Nov 29 07:11:54 crc kubenswrapper[4943]: W1129 07:11:54.960417 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3efb75d6_355a_4ec4_9032_1d23890dcf5e.slice/crio-72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614 WatchSource:0}: Error finding container 72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614: Status 404 returned error can't find the container with id 72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614 Nov 29 07:11:54 crc kubenswrapper[4943]: I1129 07:11:54.967608 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mkjph"] Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.095991 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-0721-account-create-update-v4z8w"] Nov 29 07:11:55 crc kubenswrapper[4943]: W1129 07:11:55.097354 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda28df764_ae07_442a_8786_6941476ba033.slice/crio-8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b WatchSource:0}: Error finding container 8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b: Status 404 returned error can't find the container with id 8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b Nov 29 07:11:55 crc kubenswrapper[4943]: W1129 07:11:55.243448 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod119259aa_fc0c_4ed0_bcd8_402b1016bea6.slice/crio-be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4 WatchSource:0}: Error finding container be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4: Status 404 returned error can't find the container with id be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4 Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.255312 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-54gvp"] Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.358547 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-14c7-account-create-update-qxjwt"] Nov 29 07:11:55 crc kubenswrapper[4943]: W1129 07:11:55.381898 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf108cd69_36fb_406c_9038_c690b15eaa26.slice/crio-6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d WatchSource:0}: Error finding container 6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d: Status 404 returned error can't find the container with id 6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.797259 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-195a-account-create-update-cthml" event={"ID":"3efb75d6-355a-4ec4-9032-1d23890dcf5e","Type":"ContainerStarted","Data":"72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614"} Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.798295 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-54gvp" event={"ID":"119259aa-fc0c-4ed0-bcd8-402b1016bea6","Type":"ContainerStarted","Data":"be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4"} Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.799406 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" event={"ID":"a28df764-ae07-442a-8786-6941476ba033","Type":"ContainerStarted","Data":"8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b"} Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.800608 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mkjph" event={"ID":"9b338485-63e2-4d8b-bb25-3b37aa40d138","Type":"ContainerStarted","Data":"3e871ee82948f38c31afacd4b658a1e3d96a9bd345c5d8e13601e653e84be2c3"} Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.801682 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-jrkxk" event={"ID":"2136019f-116e-4d59-8187-ddffe47807b5","Type":"ContainerStarted","Data":"31b38086762c4b06084aa88d0120f69ecd59b204c265de342273c3358212231d"} Nov 29 07:11:55 crc kubenswrapper[4943]: I1129 07:11:55.802791 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" event={"ID":"f108cd69-36fb-406c-9038-c690b15eaa26","Type":"ContainerStarted","Data":"6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d"} Nov 29 07:12:04 crc kubenswrapper[4943]: I1129 07:12:04.679342 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.889709 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-jrkxk" event={"ID":"2136019f-116e-4d59-8187-ddffe47807b5","Type":"ContainerStarted","Data":"e0024c11bcf356bf3e610c3006da72054bc3ff482fcaeb5e291233a461671ebd"} Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.891541 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" event={"ID":"f108cd69-36fb-406c-9038-c690b15eaa26","Type":"ContainerStarted","Data":"f95ea8857947a88346a5be477d9d7b70a6366fe3bc89a75539ce4779500a970f"} Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.893443 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-195a-account-create-update-cthml" event={"ID":"3efb75d6-355a-4ec4-9032-1d23890dcf5e","Type":"ContainerStarted","Data":"f2c96dca5fa6b3d1c87c32374f169e4bf3a0d16922f1714d254a2d13e981f232"} Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.895669 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-54gvp" event={"ID":"119259aa-fc0c-4ed0-bcd8-402b1016bea6","Type":"ContainerStarted","Data":"c9ab090e2256ac95cabac563f93a67ff5fbbc32bb33194b7a5adc6641f6a83bf"} Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.898118 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" event={"ID":"a28df764-ae07-442a-8786-6941476ba033","Type":"ContainerStarted","Data":"2dc7af98f42cb30a1c28f09495252b8eaa53c2cfad023af9d90ac47c523d7bdf"} Nov 29 07:12:05 crc kubenswrapper[4943]: I1129 07:12:05.899347 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mkjph" event={"ID":"9b338485-63e2-4d8b-bb25-3b37aa40d138","Type":"ContainerStarted","Data":"9649aee915343f4875d096547dbec206b902f42010dd20035ba3fadbdc2c0268"} Nov 29 07:12:07 crc kubenswrapper[4943]: I1129 07:12:07.954676 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-jrkxk" podStartSLOduration=14.954659384 podStartE2EDuration="14.954659384s" podCreationTimestamp="2025-11-29 07:11:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:07.949151199 +0000 UTC m=+2302.879239962" watchObservedRunningTime="2025-11-29 07:12:07.954659384 +0000 UTC m=+2302.884748137" Nov 29 07:12:07 crc kubenswrapper[4943]: I1129 07:12:07.972325 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" podStartSLOduration=13.972306452 podStartE2EDuration="13.972306452s" podCreationTimestamp="2025-11-29 07:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:07.966672812 +0000 UTC m=+2302.896761575" watchObservedRunningTime="2025-11-29 07:12:07.972306452 +0000 UTC m=+2302.902395205" Nov 29 07:12:07 crc kubenswrapper[4943]: I1129 07:12:07.981037 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" podStartSLOduration=13.981018347 podStartE2EDuration="13.981018347s" podCreationTimestamp="2025-11-29 07:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:07.980082385 +0000 UTC m=+2302.910171148" watchObservedRunningTime="2025-11-29 07:12:07.981018347 +0000 UTC m=+2302.911107100" Nov 29 07:12:08 crc kubenswrapper[4943]: I1129 07:12:08.003234 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-195a-account-create-update-cthml" podStartSLOduration=14.003219098 podStartE2EDuration="14.003219098s" podCreationTimestamp="2025-11-29 07:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:07.997377203 +0000 UTC m=+2302.927465956" watchObservedRunningTime="2025-11-29 07:12:08.003219098 +0000 UTC m=+2302.933307851" Nov 29 07:12:08 crc kubenswrapper[4943]: I1129 07:12:08.021349 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-mkjph" podStartSLOduration=14.021327836 podStartE2EDuration="14.021327836s" podCreationTimestamp="2025-11-29 07:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:08.014394645 +0000 UTC m=+2302.944483398" watchObservedRunningTime="2025-11-29 07:12:08.021327836 +0000 UTC m=+2302.951416589" Nov 29 07:12:08 crc kubenswrapper[4943]: I1129 07:12:08.028097 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-54gvp" podStartSLOduration=14.028079274 podStartE2EDuration="14.028079274s" podCreationTimestamp="2025-11-29 07:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:08.02672082 +0000 UTC m=+2302.956809563" watchObservedRunningTime="2025-11-29 07:12:08.028079274 +0000 UTC m=+2302.958168027" Nov 29 07:12:15 crc kubenswrapper[4943]: I1129 07:12:15.988346 4943 generic.go:334] "Generic (PLEG): container finished" podID="119259aa-fc0c-4ed0-bcd8-402b1016bea6" containerID="c9ab090e2256ac95cabac563f93a67ff5fbbc32bb33194b7a5adc6641f6a83bf" exitCode=0 Nov 29 07:12:15 crc kubenswrapper[4943]: I1129 07:12:15.988439 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-54gvp" event={"ID":"119259aa-fc0c-4ed0-bcd8-402b1016bea6","Type":"ContainerDied","Data":"c9ab090e2256ac95cabac563f93a67ff5fbbc32bb33194b7a5adc6641f6a83bf"} Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.004819 4943 generic.go:334] "Generic (PLEG): container finished" podID="f108cd69-36fb-406c-9038-c690b15eaa26" containerID="f95ea8857947a88346a5be477d9d7b70a6366fe3bc89a75539ce4779500a970f" exitCode=0 Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.004895 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" event={"ID":"f108cd69-36fb-406c-9038-c690b15eaa26","Type":"ContainerDied","Data":"f95ea8857947a88346a5be477d9d7b70a6366fe3bc89a75539ce4779500a970f"} Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.330865 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.438514 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts\") pod \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.438877 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlz5c\" (UniqueName: \"kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c\") pod \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\" (UID: \"119259aa-fc0c-4ed0-bcd8-402b1016bea6\") " Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.439389 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "119259aa-fc0c-4ed0-bcd8-402b1016bea6" (UID: "119259aa-fc0c-4ed0-bcd8-402b1016bea6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.439557 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/119259aa-fc0c-4ed0-bcd8-402b1016bea6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.444365 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c" (OuterVolumeSpecName: "kube-api-access-vlz5c") pod "119259aa-fc0c-4ed0-bcd8-402b1016bea6" (UID: "119259aa-fc0c-4ed0-bcd8-402b1016bea6"). InnerVolumeSpecName "kube-api-access-vlz5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:17 crc kubenswrapper[4943]: I1129 07:12:17.541029 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlz5c\" (UniqueName: \"kubernetes.io/projected/119259aa-fc0c-4ed0-bcd8-402b1016bea6-kube-api-access-vlz5c\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.014400 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-54gvp" event={"ID":"119259aa-fc0c-4ed0-bcd8-402b1016bea6","Type":"ContainerDied","Data":"be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4"} Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.014452 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-54gvp" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.014457 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be92a0fda36b245d4ff58d7d65ae77e18a59dda1d72ff5ac466d82ddc1619bc4" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.320714 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.353013 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts\") pod \"f108cd69-36fb-406c-9038-c690b15eaa26\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.353110 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb7tj\" (UniqueName: \"kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj\") pod \"f108cd69-36fb-406c-9038-c690b15eaa26\" (UID: \"f108cd69-36fb-406c-9038-c690b15eaa26\") " Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.355403 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f108cd69-36fb-406c-9038-c690b15eaa26" (UID: "f108cd69-36fb-406c-9038-c690b15eaa26"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.362160 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj" (OuterVolumeSpecName: "kube-api-access-zb7tj") pod "f108cd69-36fb-406c-9038-c690b15eaa26" (UID: "f108cd69-36fb-406c-9038-c690b15eaa26"). InnerVolumeSpecName "kube-api-access-zb7tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.454903 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f108cd69-36fb-406c-9038-c690b15eaa26-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:18 crc kubenswrapper[4943]: I1129 07:12:18.454945 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb7tj\" (UniqueName: \"kubernetes.io/projected/f108cd69-36fb-406c-9038-c690b15eaa26-kube-api-access-zb7tj\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:19 crc kubenswrapper[4943]: I1129 07:12:19.024062 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" event={"ID":"f108cd69-36fb-406c-9038-c690b15eaa26","Type":"ContainerDied","Data":"6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d"} Nov 29 07:12:19 crc kubenswrapper[4943]: I1129 07:12:19.024108 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d3bca5b2ba8afeb9e1cccc4d469767f93d1137a0960fff68f41720fdfc7a37d" Nov 29 07:12:19 crc kubenswrapper[4943]: I1129 07:12:19.024132 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-14c7-account-create-update-qxjwt" Nov 29 07:12:20 crc kubenswrapper[4943]: I1129 07:12:20.035047 4943 generic.go:334] "Generic (PLEG): container finished" podID="2136019f-116e-4d59-8187-ddffe47807b5" containerID="e0024c11bcf356bf3e610c3006da72054bc3ff482fcaeb5e291233a461671ebd" exitCode=0 Nov 29 07:12:20 crc kubenswrapper[4943]: I1129 07:12:20.035148 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-jrkxk" event={"ID":"2136019f-116e-4d59-8187-ddffe47807b5","Type":"ContainerDied","Data":"e0024c11bcf356bf3e610c3006da72054bc3ff482fcaeb5e291233a461671ebd"} Nov 29 07:12:20 crc kubenswrapper[4943]: I1129 07:12:20.037357 4943 generic.go:334] "Generic (PLEG): container finished" podID="9b338485-63e2-4d8b-bb25-3b37aa40d138" containerID="9649aee915343f4875d096547dbec206b902f42010dd20035ba3fadbdc2c0268" exitCode=0 Nov 29 07:12:20 crc kubenswrapper[4943]: I1129 07:12:20.037403 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mkjph" event={"ID":"9b338485-63e2-4d8b-bb25-3b37aa40d138","Type":"ContainerDied","Data":"9649aee915343f4875d096547dbec206b902f42010dd20035ba3fadbdc2c0268"} Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.056446 4943 generic.go:334] "Generic (PLEG): container finished" podID="3efb75d6-355a-4ec4-9032-1d23890dcf5e" containerID="f2c96dca5fa6b3d1c87c32374f169e4bf3a0d16922f1714d254a2d13e981f232" exitCode=0 Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.056611 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-195a-account-create-update-cthml" event={"ID":"3efb75d6-355a-4ec4-9032-1d23890dcf5e","Type":"ContainerDied","Data":"f2c96dca5fa6b3d1c87c32374f169e4bf3a0d16922f1714d254a2d13e981f232"} Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.058486 4943 generic.go:334] "Generic (PLEG): container finished" podID="a28df764-ae07-442a-8786-6941476ba033" containerID="2dc7af98f42cb30a1c28f09495252b8eaa53c2cfad023af9d90ac47c523d7bdf" exitCode=0 Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.058587 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" event={"ID":"a28df764-ae07-442a-8786-6941476ba033","Type":"ContainerDied","Data":"2dc7af98f42cb30a1c28f09495252b8eaa53c2cfad023af9d90ac47c523d7bdf"} Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.062726 4943 generic.go:334] "Generic (PLEG): container finished" podID="923cc7a6-0fd5-44c0-a568-88eeccc8f31e" containerID="6f6c601d2ad15d893b0eb2bcf658e32f1935e1ae81528cf262bc71375b1b5908" exitCode=0 Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.063332 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99dmd" event={"ID":"923cc7a6-0fd5-44c0-a568-88eeccc8f31e","Type":"ContainerDied","Data":"6f6c601d2ad15d893b0eb2bcf658e32f1935e1ae81528cf262bc71375b1b5908"} Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.476584 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.482460 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.507383 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts\") pod \"9b338485-63e2-4d8b-bb25-3b37aa40d138\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.507537 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2g46\" (UniqueName: \"kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46\") pod \"9b338485-63e2-4d8b-bb25-3b37aa40d138\" (UID: \"9b338485-63e2-4d8b-bb25-3b37aa40d138\") " Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.507608 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts\") pod \"2136019f-116e-4d59-8187-ddffe47807b5\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.507627 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl2vw\" (UniqueName: \"kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw\") pod \"2136019f-116e-4d59-8187-ddffe47807b5\" (UID: \"2136019f-116e-4d59-8187-ddffe47807b5\") " Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.508234 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2136019f-116e-4d59-8187-ddffe47807b5" (UID: "2136019f-116e-4d59-8187-ddffe47807b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.508412 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b338485-63e2-4d8b-bb25-3b37aa40d138" (UID: "9b338485-63e2-4d8b-bb25-3b37aa40d138"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.517925 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw" (OuterVolumeSpecName: "kube-api-access-fl2vw") pod "2136019f-116e-4d59-8187-ddffe47807b5" (UID: "2136019f-116e-4d59-8187-ddffe47807b5"). InnerVolumeSpecName "kube-api-access-fl2vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.522869 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46" (OuterVolumeSpecName: "kube-api-access-b2g46") pod "9b338485-63e2-4d8b-bb25-3b37aa40d138" (UID: "9b338485-63e2-4d8b-bb25-3b37aa40d138"). InnerVolumeSpecName "kube-api-access-b2g46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.609485 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b338485-63e2-4d8b-bb25-3b37aa40d138-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.609520 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2g46\" (UniqueName: \"kubernetes.io/projected/9b338485-63e2-4d8b-bb25-3b37aa40d138-kube-api-access-b2g46\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.609532 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2136019f-116e-4d59-8187-ddffe47807b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:21 crc kubenswrapper[4943]: I1129 07:12:21.609540 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl2vw\" (UniqueName: \"kubernetes.io/projected/2136019f-116e-4d59-8187-ddffe47807b5-kube-api-access-fl2vw\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.071797 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mkjph" event={"ID":"9b338485-63e2-4d8b-bb25-3b37aa40d138","Type":"ContainerDied","Data":"3e871ee82948f38c31afacd4b658a1e3d96a9bd345c5d8e13601e653e84be2c3"} Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.071834 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e871ee82948f38c31afacd4b658a1e3d96a9bd345c5d8e13601e653e84be2c3" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.071891 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mkjph" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.074179 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-jrkxk" event={"ID":"2136019f-116e-4d59-8187-ddffe47807b5","Type":"ContainerDied","Data":"31b38086762c4b06084aa88d0120f69ecd59b204c265de342273c3358212231d"} Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.074283 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31b38086762c4b06084aa88d0120f69ecd59b204c265de342273c3358212231d" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.074437 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-jrkxk" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.526605 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.534817 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99dmd" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.544114 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625440 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5rgz\" (UniqueName: \"kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz\") pod \"a28df764-ae07-442a-8786-6941476ba033\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625487 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle\") pod \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625511 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts\") pod \"a28df764-ae07-442a-8786-6941476ba033\" (UID: \"a28df764-ae07-442a-8786-6941476ba033\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625539 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs\") pod \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625746 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data\") pod \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625778 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts\") pod \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625799 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjm8q\" (UniqueName: \"kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q\") pod \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625817 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts\") pod \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\" (UID: \"923cc7a6-0fd5-44c0-a568-88eeccc8f31e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.625843 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkzsm\" (UniqueName: \"kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm\") pod \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\" (UID: \"3efb75d6-355a-4ec4-9032-1d23890dcf5e\") " Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.626102 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs" (OuterVolumeSpecName: "logs") pod "923cc7a6-0fd5-44c0-a568-88eeccc8f31e" (UID: "923cc7a6-0fd5-44c0-a568-88eeccc8f31e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.626498 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3efb75d6-355a-4ec4-9032-1d23890dcf5e" (UID: "3efb75d6-355a-4ec4-9032-1d23890dcf5e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.626662 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a28df764-ae07-442a-8786-6941476ba033" (UID: "a28df764-ae07-442a-8786-6941476ba033"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.631438 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz" (OuterVolumeSpecName: "kube-api-access-q5rgz") pod "a28df764-ae07-442a-8786-6941476ba033" (UID: "a28df764-ae07-442a-8786-6941476ba033"). InnerVolumeSpecName "kube-api-access-q5rgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.631713 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q" (OuterVolumeSpecName: "kube-api-access-mjm8q") pod "923cc7a6-0fd5-44c0-a568-88eeccc8f31e" (UID: "923cc7a6-0fd5-44c0-a568-88eeccc8f31e"). InnerVolumeSpecName "kube-api-access-mjm8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.631957 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm" (OuterVolumeSpecName: "kube-api-access-bkzsm") pod "3efb75d6-355a-4ec4-9032-1d23890dcf5e" (UID: "3efb75d6-355a-4ec4-9032-1d23890dcf5e"). InnerVolumeSpecName "kube-api-access-bkzsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.632288 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts" (OuterVolumeSpecName: "scripts") pod "923cc7a6-0fd5-44c0-a568-88eeccc8f31e" (UID: "923cc7a6-0fd5-44c0-a568-88eeccc8f31e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.652570 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data" (OuterVolumeSpecName: "config-data") pod "923cc7a6-0fd5-44c0-a568-88eeccc8f31e" (UID: "923cc7a6-0fd5-44c0-a568-88eeccc8f31e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.653115 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "923cc7a6-0fd5-44c0-a568-88eeccc8f31e" (UID: "923cc7a6-0fd5-44c0-a568-88eeccc8f31e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727604 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727637 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3efb75d6-355a-4ec4-9032-1d23890dcf5e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727647 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjm8q\" (UniqueName: \"kubernetes.io/projected/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-kube-api-access-mjm8q\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727655 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727691 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkzsm\" (UniqueName: \"kubernetes.io/projected/3efb75d6-355a-4ec4-9032-1d23890dcf5e-kube-api-access-bkzsm\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727700 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5rgz\" (UniqueName: \"kubernetes.io/projected/a28df764-ae07-442a-8786-6941476ba033-kube-api-access-q5rgz\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727709 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727717 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a28df764-ae07-442a-8786-6941476ba033-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:22 crc kubenswrapper[4943]: I1129 07:12:22.727730 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923cc7a6-0fd5-44c0-a568-88eeccc8f31e-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.086412 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" event={"ID":"a28df764-ae07-442a-8786-6941476ba033","Type":"ContainerDied","Data":"8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b"} Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.086454 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bcb93062c46ac568edf805e1e08c949a012e21b62957a6117337ccff1abe59b" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.086457 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-0721-account-create-update-v4z8w" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.088402 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99dmd" event={"ID":"923cc7a6-0fd5-44c0-a568-88eeccc8f31e","Type":"ContainerDied","Data":"1fde00aa667be0c8379a2840df2b9d358ceda5bfad38acd879d2f73c74831373"} Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.088426 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fde00aa667be0c8379a2840df2b9d358ceda5bfad38acd879d2f73c74831373" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.088459 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99dmd" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.089831 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-195a-account-create-update-cthml" event={"ID":"3efb75d6-355a-4ec4-9032-1d23890dcf5e","Type":"ContainerDied","Data":"72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614"} Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.089871 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72b0fafc7d3f69f3f7f3bedb38742a53b74bad967523d32c25b47e2588a4e614" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.089926 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-195a-account-create-update-cthml" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278101 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-654bfbd6d-pmg6n"] Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278474 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28df764-ae07-442a-8786-6941476ba033" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278496 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28df764-ae07-442a-8786-6941476ba033" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278516 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3efb75d6-355a-4ec4-9032-1d23890dcf5e" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278522 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3efb75d6-355a-4ec4-9032-1d23890dcf5e" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278531 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f108cd69-36fb-406c-9038-c690b15eaa26" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278539 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f108cd69-36fb-406c-9038-c690b15eaa26" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278552 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="923cc7a6-0fd5-44c0-a568-88eeccc8f31e" containerName="placement-db-sync" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278575 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="923cc7a6-0fd5-44c0-a568-88eeccc8f31e" containerName="placement-db-sync" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278594 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="119259aa-fc0c-4ed0-bcd8-402b1016bea6" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278600 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="119259aa-fc0c-4ed0-bcd8-402b1016bea6" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278610 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2136019f-116e-4d59-8187-ddffe47807b5" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278615 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2136019f-116e-4d59-8187-ddffe47807b5" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: E1129 07:12:23.278624 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b338485-63e2-4d8b-bb25-3b37aa40d138" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278630 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b338485-63e2-4d8b-bb25-3b37aa40d138" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278790 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a28df764-ae07-442a-8786-6941476ba033" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278809 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3efb75d6-355a-4ec4-9032-1d23890dcf5e" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278819 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="923cc7a6-0fd5-44c0-a568-88eeccc8f31e" containerName="placement-db-sync" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278827 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b338485-63e2-4d8b-bb25-3b37aa40d138" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278834 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2136019f-116e-4d59-8187-ddffe47807b5" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278841 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f108cd69-36fb-406c-9038-c690b15eaa26" containerName="mariadb-account-create-update" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.278849 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="119259aa-fc0c-4ed0-bcd8-402b1016bea6" containerName="mariadb-database-create" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.279690 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.290613 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.290758 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.290759 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.291062 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.296630 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-5xsnq" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.299465 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-654bfbd6d-pmg6n"] Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337388 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-public-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337496 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-config-data\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337529 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-internal-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337555 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-scripts\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337699 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-logs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337727 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-combined-ca-bundle\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.337764 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkzz6\" (UniqueName: \"kubernetes.io/projected/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-kube-api-access-xkzz6\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.439669 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-public-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.439813 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-config-data\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.439846 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-internal-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.440335 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-scripts\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.440909 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-logs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.440986 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-combined-ca-bundle\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.441059 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkzz6\" (UniqueName: \"kubernetes.io/projected/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-kube-api-access-xkzz6\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.441250 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-logs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.443307 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-scripts\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.443408 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-config-data\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.443685 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-combined-ca-bundle\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.444148 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-internal-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.445157 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-public-tls-certs\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.458089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkzz6\" (UniqueName: \"kubernetes.io/projected/b3eec5fd-1275-4ea7-bd28-9e468ebd2e41-kube-api-access-xkzz6\") pod \"placement-654bfbd6d-pmg6n\" (UID: \"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41\") " pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:23 crc kubenswrapper[4943]: I1129 07:12:23.594694 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.035069 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-654bfbd6d-pmg6n"] Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.107732 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-654bfbd6d-pmg6n" event={"ID":"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41","Type":"ContainerStarted","Data":"d27ac920ab590edde91448571d212c94f86bc64de8ebb2a0d44b499370241443"} Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.601181 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ws7ms"] Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.602909 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.605403 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2hxgx" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.605643 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.605990 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.611459 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ws7ms"] Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.661043 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.661340 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.661407 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgxs2\" (UniqueName: \"kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.661668 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.763483 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.763734 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.763769 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgxs2\" (UniqueName: \"kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.763855 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.769167 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.774432 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.775977 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.786223 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgxs2\" (UniqueName: \"kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2\") pod \"nova-cell0-conductor-db-sync-ws7ms\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:24 crc kubenswrapper[4943]: I1129 07:12:24.923347 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.117093 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-654bfbd6d-pmg6n" event={"ID":"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41","Type":"ContainerStarted","Data":"59d00a29377c89dd0aa5682c5b76ff963010db0d16b84c17a6b78e1f77ea51a0"} Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.117144 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-654bfbd6d-pmg6n" event={"ID":"b3eec5fd-1275-4ea7-bd28-9e468ebd2e41","Type":"ContainerStarted","Data":"5de53a0f3561afe830271115fec08ccc92b2ecea714621252c1e7b7fec0225cf"} Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.117223 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.117310 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.142656 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-654bfbd6d-pmg6n" podStartSLOduration=2.142635003 podStartE2EDuration="2.142635003s" podCreationTimestamp="2025-11-29 07:12:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:25.140602314 +0000 UTC m=+2320.070691077" watchObservedRunningTime="2025-11-29 07:12:25.142635003 +0000 UTC m=+2320.072723756" Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.401474 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ws7ms"] Nov 29 07:12:25 crc kubenswrapper[4943]: W1129 07:12:25.407342 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68a1344d_4cce_4597_894c_f167c42efe84.slice/crio-bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6 WatchSource:0}: Error finding container bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6: Status 404 returned error can't find the container with id bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6 Nov 29 07:12:25 crc kubenswrapper[4943]: I1129 07:12:25.410959 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:12:26 crc kubenswrapper[4943]: I1129 07:12:26.128002 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" event={"ID":"68a1344d-4cce-4597-894c-f167c42efe84","Type":"ContainerStarted","Data":"bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6"} Nov 29 07:12:28 crc kubenswrapper[4943]: I1129 07:12:28.153165 4943 generic.go:334] "Generic (PLEG): container finished" podID="00a997b6-77fe-4644-8034-6a35b7518421" containerID="a1cdb08a04fe78ea7a9ec26fdf44580ba70fd66c8040c2f7e07a655e73668168" exitCode=0 Nov 29 07:12:28 crc kubenswrapper[4943]: I1129 07:12:28.153345 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dnmc7" event={"ID":"00a997b6-77fe-4644-8034-6a35b7518421","Type":"ContainerDied","Data":"a1cdb08a04fe78ea7a9ec26fdf44580ba70fd66c8040c2f7e07a655e73668168"} Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.634725 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.762643 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data\") pod \"00a997b6-77fe-4644-8034-6a35b7518421\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.762690 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle\") pod \"00a997b6-77fe-4644-8034-6a35b7518421\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.762761 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ffx2\" (UniqueName: \"kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2\") pod \"00a997b6-77fe-4644-8034-6a35b7518421\" (UID: \"00a997b6-77fe-4644-8034-6a35b7518421\") " Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.769174 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2" (OuterVolumeSpecName: "kube-api-access-4ffx2") pod "00a997b6-77fe-4644-8034-6a35b7518421" (UID: "00a997b6-77fe-4644-8034-6a35b7518421"). InnerVolumeSpecName "kube-api-access-4ffx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.770166 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "00a997b6-77fe-4644-8034-6a35b7518421" (UID: "00a997b6-77fe-4644-8034-6a35b7518421"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.790962 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00a997b6-77fe-4644-8034-6a35b7518421" (UID: "00a997b6-77fe-4644-8034-6a35b7518421"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.865227 4943 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.865256 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00a997b6-77fe-4644-8034-6a35b7518421-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:33 crc kubenswrapper[4943]: I1129 07:12:33.865265 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ffx2\" (UniqueName: \"kubernetes.io/projected/00a997b6-77fe-4644-8034-6a35b7518421-kube-api-access-4ffx2\") on node \"crc\" DevicePath \"\"" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.217693 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dnmc7" event={"ID":"00a997b6-77fe-4644-8034-6a35b7518421","Type":"ContainerDied","Data":"f2ecf5d0add55eb21f8f18cf4da9cc008de2c94661b8cd4acc84529ca7122af9"} Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.217738 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2ecf5d0add55eb21f8f18cf4da9cc008de2c94661b8cd4acc84529ca7122af9" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.217749 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dnmc7" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.933402 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-65f55968c5-wz8zv"] Nov 29 07:12:34 crc kubenswrapper[4943]: E1129 07:12:34.936182 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00a997b6-77fe-4644-8034-6a35b7518421" containerName="barbican-db-sync" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.936201 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="00a997b6-77fe-4644-8034-6a35b7518421" containerName="barbican-db-sync" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.936398 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="00a997b6-77fe-4644-8034-6a35b7518421" containerName="barbican-db-sync" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.937351 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.943152 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.943381 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v9j9p" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.944270 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.953729 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5666c9f9fb-m5bqf"] Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.955303 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.959822 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.972412 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-65f55968c5-wz8zv"] Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.986980 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5666c9f9fb-m5bqf"] Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.992287 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.992351 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8aed7-018b-4cc9-aead-9c93d1863e10-logs\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.992387 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlt2v\" (UniqueName: \"kubernetes.io/projected/09f8aed7-018b-4cc9-aead-9c93d1863e10-kube-api-access-jlt2v\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.992444 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data-custom\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:34 crc kubenswrapper[4943]: I1129 07:12:34.992482 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-combined-ca-bundle\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.042614 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.044009 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.067237 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.094661 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-combined-ca-bundle\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.094963 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095098 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8aed7-018b-4cc9-aead-9c93d1863e10-logs\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095188 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlt2v\" (UniqueName: \"kubernetes.io/projected/09f8aed7-018b-4cc9-aead-9c93d1863e10-kube-api-access-jlt2v\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095298 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-logs\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095421 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095576 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data-custom\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095713 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-combined-ca-bundle\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.095865 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097138 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09f8aed7-018b-4cc9-aead-9c93d1863e10-logs\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097268 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097287 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097369 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4chs\" (UniqueName: \"kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097423 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xprq4\" (UniqueName: \"kubernetes.io/projected/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-kube-api-access-xprq4\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097487 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data-custom\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.097526 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.102042 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-combined-ca-bundle\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.104752 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.104848 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f8aed7-018b-4cc9-aead-9c93d1863e10-config-data-custom\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.116987 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlt2v\" (UniqueName: \"kubernetes.io/projected/09f8aed7-018b-4cc9-aead-9c93d1863e10-kube-api-access-jlt2v\") pod \"barbican-worker-65f55968c5-wz8zv\" (UID: \"09f8aed7-018b-4cc9-aead-9c93d1863e10\") " pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.181200 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.182576 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.185325 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.194132 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199256 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199331 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199360 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199398 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4chs\" (UniqueName: \"kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199427 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xprq4\" (UniqueName: \"kubernetes.io/projected/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-kube-api-access-xprq4\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199457 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data-custom\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199488 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199523 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-combined-ca-bundle\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199584 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-logs\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.199607 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.202499 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-logs\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.203255 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.203255 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.203987 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.204945 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data-custom\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.206834 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.208496 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-config-data\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.210628 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-combined-ca-bundle\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.225590 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4chs\" (UniqueName: \"kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs\") pod \"dnsmasq-dns-7c55bf9497-p8xzj\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.228073 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xprq4\" (UniqueName: \"kubernetes.io/projected/9e6b1af0-828e-4bc6-afc5-ae9728bf0f62-kube-api-access-xprq4\") pod \"barbican-keystone-listener-5666c9f9fb-m5bqf\" (UID: \"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62\") " pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.261555 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-65f55968c5-wz8zv" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.294079 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.301700 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.301778 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.301863 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.302009 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.302093 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmckl\" (UniqueName: \"kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.373297 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.403369 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmckl\" (UniqueName: \"kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.403448 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.403475 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.403507 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.403627 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.404200 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.407680 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.407817 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.408118 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.421598 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmckl\" (UniqueName: \"kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl\") pod \"barbican-api-56d48b4698-6bggx\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:35 crc kubenswrapper[4943]: I1129 07:12:35.579353 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:37 crc kubenswrapper[4943]: I1129 07:12:37.942738 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5648447994-lm7pg"] Nov 29 07:12:37 crc kubenswrapper[4943]: I1129 07:12:37.944979 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:37 crc kubenswrapper[4943]: I1129 07:12:37.948838 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 29 07:12:37 crc kubenswrapper[4943]: I1129 07:12:37.949084 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 29 07:12:37 crc kubenswrapper[4943]: I1129 07:12:37.963651 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5648447994-lm7pg"] Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050289 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29b74\" (UniqueName: \"kubernetes.io/projected/444ebf94-f3eb-4f21-9a69-7730d465c3b6-kube-api-access-29b74\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050383 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data-custom\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050410 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-combined-ca-bundle\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050437 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050463 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-public-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050500 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-internal-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.050534 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/444ebf94-f3eb-4f21-9a69-7730d465c3b6-logs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151716 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-internal-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151783 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/444ebf94-f3eb-4f21-9a69-7730d465c3b6-logs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151901 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29b74\" (UniqueName: \"kubernetes.io/projected/444ebf94-f3eb-4f21-9a69-7730d465c3b6-kube-api-access-29b74\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151948 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data-custom\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151973 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-combined-ca-bundle\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.151992 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.152011 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-public-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.152337 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/444ebf94-f3eb-4f21-9a69-7730d465c3b6-logs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.157506 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-combined-ca-bundle\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.158092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-internal-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.158939 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.159060 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-public-tls-certs\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.160125 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/444ebf94-f3eb-4f21-9a69-7730d465c3b6-config-data-custom\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.169898 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29b74\" (UniqueName: \"kubernetes.io/projected/444ebf94-f3eb-4f21-9a69-7730d465c3b6-kube-api-access-29b74\") pod \"barbican-api-5648447994-lm7pg\" (UID: \"444ebf94-f3eb-4f21-9a69-7730d465c3b6\") " pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.265304 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.835249 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5648447994-lm7pg"] Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.949947 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:12:38 crc kubenswrapper[4943]: W1129 07:12:38.951304 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd98f919d_982c_41ed_92f0_4ad5668530ef.slice/crio-e0a47cdcaa4b6462685cbc89f59a20cf58a84924b160de83138b9f0463edfc86 WatchSource:0}: Error finding container e0a47cdcaa4b6462685cbc89f59a20cf58a84924b160de83138b9f0463edfc86: Status 404 returned error can't find the container with id e0a47cdcaa4b6462685cbc89f59a20cf58a84924b160de83138b9f0463edfc86 Nov 29 07:12:38 crc kubenswrapper[4943]: W1129 07:12:38.951534 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7250ef3_7d67_4e63_bd25_ab511d058ee7.slice/crio-29183efa9c08ed2702a3c3565a7a2b369a3ef0b8da891980699bc21e1e8f2693 WatchSource:0}: Error finding container 29183efa9c08ed2702a3c3565a7a2b369a3ef0b8da891980699bc21e1e8f2693: Status 404 returned error can't find the container with id 29183efa9c08ed2702a3c3565a7a2b369a3ef0b8da891980699bc21e1e8f2693 Nov 29 07:12:38 crc kubenswrapper[4943]: I1129 07:12:38.964292 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.100007 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-65f55968c5-wz8zv"] Nov 29 07:12:39 crc kubenswrapper[4943]: W1129 07:12:39.107057 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09f8aed7_018b_4cc9_aead_9c93d1863e10.slice/crio-4b726f7f18ccee554313fcb81e7834fc74731044b348ab56f9433be2f4b14de8 WatchSource:0}: Error finding container 4b726f7f18ccee554313fcb81e7834fc74731044b348ab56f9433be2f4b14de8: Status 404 returned error can't find the container with id 4b726f7f18ccee554313fcb81e7834fc74731044b348ab56f9433be2f4b14de8 Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.110578 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5666c9f9fb-m5bqf"] Nov 29 07:12:39 crc kubenswrapper[4943]: W1129 07:12:39.112549 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e6b1af0_828e_4bc6_afc5_ae9728bf0f62.slice/crio-cec64bfdb77866b7205bbcea8f491d6079942c52175fa4c35c0d486b9299af36 WatchSource:0}: Error finding container cec64bfdb77866b7205bbcea8f491d6079942c52175fa4c35c0d486b9299af36: Status 404 returned error can't find the container with id cec64bfdb77866b7205bbcea8f491d6079942c52175fa4c35c0d486b9299af36 Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.266109 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" event={"ID":"68a1344d-4cce-4597-894c-f167c42efe84","Type":"ContainerStarted","Data":"391dce06ed9d044c50a78e5dfba711255d7f190bb557f7d231cee40b8757f07c"} Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.267848 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5648447994-lm7pg" event={"ID":"444ebf94-f3eb-4f21-9a69-7730d465c3b6","Type":"ContainerStarted","Data":"60d5f6b68bffe68045303fcaf248ba0d0e469d0c97273e76728a76db8137b05f"} Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.269224 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-65f55968c5-wz8zv" event={"ID":"09f8aed7-018b-4cc9-aead-9c93d1863e10","Type":"ContainerStarted","Data":"4b726f7f18ccee554313fcb81e7834fc74731044b348ab56f9433be2f4b14de8"} Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.271136 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerStarted","Data":"e0a47cdcaa4b6462685cbc89f59a20cf58a84924b160de83138b9f0463edfc86"} Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.272512 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" event={"ID":"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62","Type":"ContainerStarted","Data":"cec64bfdb77866b7205bbcea8f491d6079942c52175fa4c35c0d486b9299af36"} Nov 29 07:12:39 crc kubenswrapper[4943]: I1129 07:12:39.275012 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerStarted","Data":"29183efa9c08ed2702a3c3565a7a2b369a3ef0b8da891980699bc21e1e8f2693"} Nov 29 07:12:41 crc kubenswrapper[4943]: I1129 07:12:41.297782 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5648447994-lm7pg" event={"ID":"444ebf94-f3eb-4f21-9a69-7730d465c3b6","Type":"ContainerStarted","Data":"db014994aff58126784c650fc9a8333e4653d1c8b92e633309b9ece87cc59020"} Nov 29 07:12:42 crc kubenswrapper[4943]: I1129 07:12:42.317296 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerStarted","Data":"cb2652f2915562864f87f8398dd1432f26081df65d29ed173d744fc9fd5739ed"} Nov 29 07:12:42 crc kubenswrapper[4943]: I1129 07:12:42.319310 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerStarted","Data":"1f5f7fb2c6b73fea2d9b60237a42ff191ec6322d4d44afd1b1ebdee35af2ab2d"} Nov 29 07:12:43 crc kubenswrapper[4943]: I1129 07:12:43.328660 4943 generic.go:334] "Generic (PLEG): container finished" podID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerID="1f5f7fb2c6b73fea2d9b60237a42ff191ec6322d4d44afd1b1ebdee35af2ab2d" exitCode=0 Nov 29 07:12:43 crc kubenswrapper[4943]: I1129 07:12:43.337102 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerDied","Data":"1f5f7fb2c6b73fea2d9b60237a42ff191ec6322d4d44afd1b1ebdee35af2ab2d"} Nov 29 07:12:43 crc kubenswrapper[4943]: I1129 07:12:43.377702 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" podStartSLOduration=6.35252382 podStartE2EDuration="19.377680959s" podCreationTimestamp="2025-11-29 07:12:24 +0000 UTC" firstStartedPulling="2025-11-29 07:12:25.410633724 +0000 UTC m=+2320.340722477" lastFinishedPulling="2025-11-29 07:12:38.435790853 +0000 UTC m=+2333.365879616" observedRunningTime="2025-11-29 07:12:43.374787428 +0000 UTC m=+2338.304876191" watchObservedRunningTime="2025-11-29 07:12:43.377680959 +0000 UTC m=+2338.307769712" Nov 29 07:12:45 crc kubenswrapper[4943]: I1129 07:12:45.349406 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5648447994-lm7pg" event={"ID":"444ebf94-f3eb-4f21-9a69-7730d465c3b6","Type":"ContainerStarted","Data":"4d0919d3306dcfacc8b3dbac3cbf0837ec47286e2e89c759470ff8aa7b6eab40"} Nov 29 07:12:46 crc kubenswrapper[4943]: I1129 07:12:46.360879 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerStarted","Data":"fa49c532a40fbcc90c5bde1f0bc53291b92d285d35d7b79673c004b543f2fbe3"} Nov 29 07:12:49 crc kubenswrapper[4943]: I1129 07:12:49.398865 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerStarted","Data":"a363585a07ce3b1222b542c528d329a0bfa56a70d09a494b5ec4c2f8797488fb"} Nov 29 07:12:49 crc kubenswrapper[4943]: I1129 07:12:49.399927 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:49 crc kubenswrapper[4943]: I1129 07:12:49.423667 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5648447994-lm7pg" podStartSLOduration=12.423633048 podStartE2EDuration="12.423633048s" podCreationTimestamp="2025-11-29 07:12:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:49.419465465 +0000 UTC m=+2344.349554248" watchObservedRunningTime="2025-11-29 07:12:49.423633048 +0000 UTC m=+2344.353721801" Nov 29 07:12:49 crc kubenswrapper[4943]: I1129 07:12:49.449493 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" podStartSLOduration=15.449463146 podStartE2EDuration="15.449463146s" podCreationTimestamp="2025-11-29 07:12:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:49.439497909 +0000 UTC m=+2344.369586682" watchObservedRunningTime="2025-11-29 07:12:49.449463146 +0000 UTC m=+2344.379551899" Nov 29 07:12:49 crc kubenswrapper[4943]: I1129 07:12:49.469249 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-56d48b4698-6bggx" podStartSLOduration=14.469221493 podStartE2EDuration="14.469221493s" podCreationTimestamp="2025-11-29 07:12:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:12:49.464903907 +0000 UTC m=+2344.394992680" watchObservedRunningTime="2025-11-29 07:12:49.469221493 +0000 UTC m=+2344.399310246" Nov 29 07:12:50 crc kubenswrapper[4943]: I1129 07:12:50.374030 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:50 crc kubenswrapper[4943]: I1129 07:12:50.416508 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:12:50 crc kubenswrapper[4943]: I1129 07:12:50.579997 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:50 crc kubenswrapper[4943]: I1129 07:12:50.580042 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:12:51 crc kubenswrapper[4943]: I1129 07:12:51.424299 4943 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 07:12:54 crc kubenswrapper[4943]: I1129 07:12:54.406812 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.274006 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.274018 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.375465 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.420822 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.437145 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.437612 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="dnsmasq-dns" containerID="cri-o://15e64e0735b30046273a8fdc4062d174f393e16f5d2556c587d60525229e691b" gracePeriod=10 Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.701716 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.140:5353: connect: connection refused" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.743788 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.743829 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.743793 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:55 crc kubenswrapper[4943]: I1129 07:12:55.743796 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:59 crc kubenswrapper[4943]: E1129 07:12:59.135239 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified" Nov 29 07:12:59 crc kubenswrapper[4943]: E1129 07:12:59.136784 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-keystone-listener-log,Image:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,Command:[/usr/bin/dumb-init],Args:[--single-child -- /usr/bin/tail -n+1 -F /var/log/barbican/barbican-keystone-listener.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd5h648h68h4h5bch54fhb4hc5h57fh676h5dh65ch696h86h6fh59chffh68fh679h66bhcbh549h64dh65hbch67hf9h5c5h669hb8h68ch7cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/barbican,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xprq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-keystone-listener-5666c9f9fb-m5bqf_openstack(9e6b1af0-828e-4bc6-afc5-ae9728bf0f62): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:12:59 crc kubenswrapper[4943]: E1129 07:12:59.140162 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-keystone-listener-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"barbican-keystone-listener\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\"]" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" podUID="9e6b1af0-828e-4bc6-afc5-ae9728bf0f62" Nov 29 07:12:59 crc kubenswrapper[4943]: I1129 07:12:59.412762 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:12:59 crc kubenswrapper[4943]: I1129 07:12:59.489471 4943 generic.go:334] "Generic (PLEG): container finished" podID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerID="15e64e0735b30046273a8fdc4062d174f393e16f5d2556c587d60525229e691b" exitCode=0 Nov 29 07:12:59 crc kubenswrapper[4943]: I1129 07:12:59.489743 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerDied","Data":"15e64e0735b30046273a8fdc4062d174f393e16f5d2556c587d60525229e691b"} Nov 29 07:12:59 crc kubenswrapper[4943]: E1129 07:12:59.491973 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-keystone-listener-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\", failed to \"StartContainer\" for \"barbican-keystone-listener\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified\\\"\"]" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" podUID="9e6b1af0-828e-4bc6-afc5-ae9728bf0f62" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.282706 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.282705 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.289554 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:13:00 crc kubenswrapper[4943]: E1129 07:13:00.333587 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified" Nov 29 07:13:00 crc kubenswrapper[4943]: E1129 07:13:00.333745 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-worker-log,Image:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,Command:[/usr/bin/dumb-init],Args:[--single-child -- /usr/bin/tail -n+1 -F /var/log/barbican/barbican-worker.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56fh659h55ch559h65ch5d7h5c6h99hfbh547h54dh68ch586h57fh79hcbh5bh57bh5f6h68dh8bhd7h64fh5c9h5b8h587h5b8h549h589h65fh5fch548q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/barbican,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jlt2v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-worker-65f55968c5-wz8zv_openstack(09f8aed7-018b-4cc9-aead-9c93d1863e10): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:13:00 crc kubenswrapper[4943]: E1129 07:13:00.335704 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-worker-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"barbican-worker\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified\\\"\"]" pod="openstack/barbican-worker-65f55968c5-wz8zv" podUID="09f8aed7-018b-4cc9-aead-9c93d1863e10" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.386182 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb\") pod \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.386452 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb\") pod \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.386649 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config\") pod \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.386682 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xks9w\" (UniqueName: \"kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w\") pod \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.386814 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc\") pod \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\" (UID: \"9d6b7e59-a19e-40ed-92f7-777faf6041c1\") " Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.395052 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w" (OuterVolumeSpecName: "kube-api-access-xks9w") pod "9d6b7e59-a19e-40ed-92f7-777faf6041c1" (UID: "9d6b7e59-a19e-40ed-92f7-777faf6041c1"). InnerVolumeSpecName "kube-api-access-xks9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.421779 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5648447994-lm7pg" podUID="444ebf94-f3eb-4f21-9a69-7730d465c3b6" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.459148 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config" (OuterVolumeSpecName: "config") pod "9d6b7e59-a19e-40ed-92f7-777faf6041c1" (UID: "9d6b7e59-a19e-40ed-92f7-777faf6041c1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.481251 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9d6b7e59-a19e-40ed-92f7-777faf6041c1" (UID: "9d6b7e59-a19e-40ed-92f7-777faf6041c1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.490930 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.490983 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xks9w\" (UniqueName: \"kubernetes.io/projected/9d6b7e59-a19e-40ed-92f7-777faf6041c1-kube-api-access-xks9w\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.491001 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.511660 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9d6b7e59-a19e-40ed-92f7-777faf6041c1" (UID: "9d6b7e59-a19e-40ed-92f7-777faf6041c1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.518025 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.518387 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-745b9ddc8c-gq7tj" event={"ID":"9d6b7e59-a19e-40ed-92f7-777faf6041c1","Type":"ContainerDied","Data":"f9e619dca346ce16d896e63d5df090151301c2174701dd6413ee76b99fdbafde"} Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.518421 4943 scope.go:117] "RemoveContainer" containerID="15e64e0735b30046273a8fdc4062d174f393e16f5d2556c587d60525229e691b" Nov 29 07:13:00 crc kubenswrapper[4943]: E1129 07:13:00.520320 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"barbican-worker-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified\\\"\", failed to \"StartContainer\" for \"barbican-worker\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified\\\"\"]" pod="openstack/barbican-worker-65f55968c5-wz8zv" podUID="09f8aed7-018b-4cc9-aead-9c93d1863e10" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.606476 4943 scope.go:117] "RemoveContainer" containerID="62abad4c67a25a37e880dfca5cbd646918fa8e0e136f4d5d885265a02207d876" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.600395 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.607384 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9d6b7e59-a19e-40ed-92f7-777faf6041c1" (UID: "9d6b7e59-a19e-40ed-92f7-777faf6041c1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.708258 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d6b7e59-a19e-40ed-92f7-777faf6041c1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.858937 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.859936 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.860124 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.860281 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.860394 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.860480 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.860508 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5648447994-lm7pg" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.869746 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-745b9ddc8c-gq7tj"] Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.922861 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.923150 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" containerID="cri-o://cb2652f2915562864f87f8398dd1432f26081df65d29ed173d744fc9fd5739ed" gracePeriod=30 Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.923620 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" containerID="cri-o://fa49c532a40fbcc90c5bde1f0bc53291b92d285d35d7b79673c004b543f2fbe3" gracePeriod=30 Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.969844 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": EOF" Nov 29 07:13:00 crc kubenswrapper[4943]: I1129 07:13:00.969907 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": EOF" Nov 29 07:13:01 crc kubenswrapper[4943]: I1129 07:13:01.337912 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" path="/var/lib/kubelet/pods/9d6b7e59-a19e-40ed-92f7-777faf6041c1/volumes" Nov 29 07:13:01 crc kubenswrapper[4943]: I1129 07:13:01.532621 4943 generic.go:334] "Generic (PLEG): container finished" podID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerID="cb2652f2915562864f87f8398dd1432f26081df65d29ed173d744fc9fd5739ed" exitCode=143 Nov 29 07:13:01 crc kubenswrapper[4943]: I1129 07:13:01.532744 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerDied","Data":"cb2652f2915562864f87f8398dd1432f26081df65d29ed173d744fc9fd5739ed"} Nov 29 07:13:02 crc kubenswrapper[4943]: I1129 07:13:02.613790 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:13:02 crc kubenswrapper[4943]: I1129 07:13:02.613855 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:13:04 crc kubenswrapper[4943]: I1129 07:13:04.352789 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": read tcp 10.217.0.2:59436->10.217.0.157:9311: read: connection reset by peer" Nov 29 07:13:04 crc kubenswrapper[4943]: I1129 07:13:04.353942 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": read tcp 10.217.0.2:59450->10.217.0.157:9311: read: connection reset by peer" Nov 29 07:13:04 crc kubenswrapper[4943]: I1129 07:13:04.353966 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": dial tcp 10.217.0.157:9311: connect: connection refused" Nov 29 07:13:05 crc kubenswrapper[4943]: I1129 07:13:05.585115 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": dial tcp 10.217.0.157:9311: connect: connection refused" Nov 29 07:13:05 crc kubenswrapper[4943]: I1129 07:13:05.590818 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56d48b4698-6bggx" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": dial tcp 10.217.0.157:9311: connect: connection refused" Nov 29 07:13:05 crc kubenswrapper[4943]: I1129 07:13:05.599334 4943 generic.go:334] "Generic (PLEG): container finished" podID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerID="fa49c532a40fbcc90c5bde1f0bc53291b92d285d35d7b79673c004b543f2fbe3" exitCode=0 Nov 29 07:13:05 crc kubenswrapper[4943]: I1129 07:13:05.599390 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerDied","Data":"fa49c532a40fbcc90c5bde1f0bc53291b92d285d35d7b79673c004b543f2fbe3"} Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.824395 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.937512 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs\") pod \"d98f919d-982c-41ed-92f0-4ad5668530ef\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.937634 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle\") pod \"d98f919d-982c-41ed-92f0-4ad5668530ef\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.937698 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmckl\" (UniqueName: \"kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl\") pod \"d98f919d-982c-41ed-92f0-4ad5668530ef\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.937805 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data\") pod \"d98f919d-982c-41ed-92f0-4ad5668530ef\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.937900 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom\") pod \"d98f919d-982c-41ed-92f0-4ad5668530ef\" (UID: \"d98f919d-982c-41ed-92f0-4ad5668530ef\") " Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.938095 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs" (OuterVolumeSpecName: "logs") pod "d98f919d-982c-41ed-92f0-4ad5668530ef" (UID: "d98f919d-982c-41ed-92f0-4ad5668530ef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.938319 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d98f919d-982c-41ed-92f0-4ad5668530ef-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.958380 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl" (OuterVolumeSpecName: "kube-api-access-jmckl") pod "d98f919d-982c-41ed-92f0-4ad5668530ef" (UID: "d98f919d-982c-41ed-92f0-4ad5668530ef"). InnerVolumeSpecName "kube-api-access-jmckl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.959443 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d98f919d-982c-41ed-92f0-4ad5668530ef" (UID: "d98f919d-982c-41ed-92f0-4ad5668530ef"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.968351 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d98f919d-982c-41ed-92f0-4ad5668530ef" (UID: "d98f919d-982c-41ed-92f0-4ad5668530ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:06 crc kubenswrapper[4943]: I1129 07:13:06.993226 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data" (OuterVolumeSpecName: "config-data") pod "d98f919d-982c-41ed-92f0-4ad5668530ef" (UID: "d98f919d-982c-41ed-92f0-4ad5668530ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.039673 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.039711 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmckl\" (UniqueName: \"kubernetes.io/projected/d98f919d-982c-41ed-92f0-4ad5668530ef-kube-api-access-jmckl\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.039724 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.039733 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d98f919d-982c-41ed-92f0-4ad5668530ef-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.628884 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56d48b4698-6bggx" event={"ID":"d98f919d-982c-41ed-92f0-4ad5668530ef","Type":"ContainerDied","Data":"e0a47cdcaa4b6462685cbc89f59a20cf58a84924b160de83138b9f0463edfc86"} Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.629302 4943 scope.go:117] "RemoveContainer" containerID="fa49c532a40fbcc90c5bde1f0bc53291b92d285d35d7b79673c004b543f2fbe3" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.628926 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56d48b4698-6bggx" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.649461 4943 scope.go:117] "RemoveContainer" containerID="cb2652f2915562864f87f8398dd1432f26081df65d29ed173d744fc9fd5739ed" Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.658273 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:13:07 crc kubenswrapper[4943]: I1129 07:13:07.672101 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-56d48b4698-6bggx"] Nov 29 07:13:09 crc kubenswrapper[4943]: I1129 07:13:09.232119 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:13:09 crc kubenswrapper[4943]: I1129 07:13:09.232845 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-654bfbd6d-pmg6n" Nov 29 07:13:09 crc kubenswrapper[4943]: I1129 07:13:09.337939 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" path="/var/lib/kubelet/pods/d98f919d-982c-41ed-92f0-4ad5668530ef/volumes" Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.172794 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.173330 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-central-agent" containerID="cri-o://9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0" gracePeriod=30 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.173389 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="sg-core" containerID="cri-o://d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f" gracePeriod=30 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.173454 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-notification-agent" containerID="cri-o://de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be" gracePeriod=30 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.173458 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="proxy-httpd" containerID="cri-o://1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864" gracePeriod=30 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.657118 4943 generic.go:334] "Generic (PLEG): container finished" podID="37971419-7e31-42b7-a067-28a17f988a77" containerID="1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864" exitCode=0 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.657154 4943 generic.go:334] "Generic (PLEG): container finished" podID="37971419-7e31-42b7-a067-28a17f988a77" containerID="d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f" exitCode=2 Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.657175 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerDied","Data":"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864"} Nov 29 07:13:10 crc kubenswrapper[4943]: I1129 07:13:10.657200 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerDied","Data":"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f"} Nov 29 07:13:10 crc kubenswrapper[4943]: E1129 07:13:10.737871 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37971419_7e31_42b7_a067_28a17f988a77.slice/crio-9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0.scope\": RecentStats: unable to find data in memory cache]" Nov 29 07:13:11 crc kubenswrapper[4943]: I1129 07:13:11.668217 4943 generic.go:334] "Generic (PLEG): container finished" podID="37971419-7e31-42b7-a067-28a17f988a77" containerID="9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0" exitCode=0 Nov 29 07:13:11 crc kubenswrapper[4943]: I1129 07:13:11.668503 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerDied","Data":"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.408955 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438351 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bk992\" (UniqueName: \"kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438507 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438548 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438596 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438653 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438674 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.438695 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd\") pod \"37971419-7e31-42b7-a067-28a17f988a77\" (UID: \"37971419-7e31-42b7-a067-28a17f988a77\") " Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.439903 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.440059 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.455355 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts" (OuterVolumeSpecName: "scripts") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.455408 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992" (OuterVolumeSpecName: "kube-api-access-bk992") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "kube-api-access-bk992". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.485482 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.541484 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.541586 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37971419-7e31-42b7-a067-28a17f988a77-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.541602 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bk992\" (UniqueName: \"kubernetes.io/projected/37971419-7e31-42b7-a067-28a17f988a77-kube-api-access-bk992\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.541621 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.541636 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.580196 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.628488 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data" (OuterVolumeSpecName: "config-data") pod "37971419-7e31-42b7-a067-28a17f988a77" (UID: "37971419-7e31-42b7-a067-28a17f988a77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.643734 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.643780 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37971419-7e31-42b7-a067-28a17f988a77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.682409 4943 generic.go:334] "Generic (PLEG): container finished" podID="37971419-7e31-42b7-a067-28a17f988a77" containerID="de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be" exitCode=0 Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.682514 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerDied","Data":"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.682549 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37971419-7e31-42b7-a067-28a17f988a77","Type":"ContainerDied","Data":"19cf3dac373ac6f844bd35ff183914958ca1219b4f0ba6e7d556d9023f5dea42"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.682593 4943 scope.go:117] "RemoveContainer" containerID="1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.682738 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.688538 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-65f55968c5-wz8zv" event={"ID":"09f8aed7-018b-4cc9-aead-9c93d1863e10","Type":"ContainerStarted","Data":"8f9822f0b28c78b7b7dad7bdb41616a49037b1ea51a6a61df3ccd9c1d1a91dc8"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.688622 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-65f55968c5-wz8zv" event={"ID":"09f8aed7-018b-4cc9-aead-9c93d1863e10","Type":"ContainerStarted","Data":"f2dc650e5e85fa9727114f403b6e7445bba95ca6bc5b18476d944597f1efc763"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.699669 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" event={"ID":"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62","Type":"ContainerStarted","Data":"b548d8e214db6a4fce0b424b4e932d87a682cfe9eb1e134a76d8bfcf8f1c8cb0"} Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.712649 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-65f55968c5-wz8zv" podStartSLOduration=5.807278641 podStartE2EDuration="38.712616707s" podCreationTimestamp="2025-11-29 07:12:34 +0000 UTC" firstStartedPulling="2025-11-29 07:12:39.10962421 +0000 UTC m=+2334.039712963" lastFinishedPulling="2025-11-29 07:13:12.014962276 +0000 UTC m=+2366.945051029" observedRunningTime="2025-11-29 07:13:12.705492962 +0000 UTC m=+2367.635581725" watchObservedRunningTime="2025-11-29 07:13:12.712616707 +0000 UTC m=+2367.642705470" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.714717 4943 scope.go:117] "RemoveContainer" containerID="d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.743662 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.748532 4943 scope.go:117] "RemoveContainer" containerID="de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.760783 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.779999 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780420 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="sg-core" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780445 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="sg-core" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780460 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-central-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780470 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-central-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780486 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="dnsmasq-dns" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780495 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="dnsmasq-dns" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780506 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780514 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780536 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-notification-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780545 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-notification-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780558 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="proxy-httpd" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780620 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="proxy-httpd" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780666 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780676 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.780692 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="init" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780699 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="init" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780903 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-notification-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780923 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6b7e59-a19e-40ed-92f7-777faf6041c1" containerName="dnsmasq-dns" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780941 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api-log" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780954 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d98f919d-982c-41ed-92f0-4ad5668530ef" containerName="barbican-api" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780967 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="ceilometer-central-agent" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780983 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="sg-core" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.780996 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="37971419-7e31-42b7-a067-28a17f988a77" containerName="proxy-httpd" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.782914 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.788746 4943 scope.go:117] "RemoveContainer" containerID="9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.789209 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.795471 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.843094 4943 scope.go:117] "RemoveContainer" containerID="1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.847057 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864\": container with ID starting with 1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864 not found: ID does not exist" containerID="1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847155 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864"} err="failed to get container status \"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864\": rpc error: code = NotFound desc = could not find container \"1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864\": container with ID starting with 1c49d04c74aea47d016c4fbe71d16bed01420eea88154b00f2bf538cb5ce5864 not found: ID does not exist" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847194 4943 scope.go:117] "RemoveContainer" containerID="d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847354 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.847640 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f\": container with ID starting with d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f not found: ID does not exist" containerID="d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847672 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f"} err="failed to get container status \"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f\": rpc error: code = NotFound desc = could not find container \"d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f\": container with ID starting with d52709d12c33b88a34b46e0c8ae576a5a0923a1095c340eefd9dacc2c5422a1f not found: ID does not exist" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847696 4943 scope.go:117] "RemoveContainer" containerID="de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.847946 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be\": container with ID starting with de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be not found: ID does not exist" containerID="de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.847979 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be"} err="failed to get container status \"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be\": rpc error: code = NotFound desc = could not find container \"de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be\": container with ID starting with de7d994af355d403dd7e577d0c1cbbd34ebf8f62ec92f5b60480e48d3fa575be not found: ID does not exist" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.848002 4943 scope.go:117] "RemoveContainer" containerID="9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0" Nov 29 07:13:12 crc kubenswrapper[4943]: E1129 07:13:12.848306 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0\": container with ID starting with 9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0 not found: ID does not exist" containerID="9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.848359 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0"} err="failed to get container status \"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0\": rpc error: code = NotFound desc = could not find container \"9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0\": container with ID starting with 9b9ba90bad632185df7a01f7cd870259a270ae3c35ee9d409ec8667c026463e0 not found: ID does not exist" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.848680 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4shgn\" (UniqueName: \"kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.848917 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.849043 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.849141 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.849302 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.849470 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.849606 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.950836 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4shgn\" (UniqueName: \"kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.950898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.950932 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.950960 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.951020 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.951078 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.951106 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.951700 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.952119 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.957803 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.958257 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.960024 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.960452 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:12 crc kubenswrapper[4943]: I1129 07:13:12.969969 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4shgn\" (UniqueName: \"kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn\") pod \"ceilometer-0\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " pod="openstack/ceilometer-0" Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.130873 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.374238 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37971419-7e31-42b7-a067-28a17f988a77" path="/var/lib/kubelet/pods/37971419-7e31-42b7-a067-28a17f988a77/volumes" Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.514367 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.711991 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" event={"ID":"9e6b1af0-828e-4bc6-afc5-ae9728bf0f62","Type":"ContainerStarted","Data":"9bb1b498db09bda0a4362e099f344f8fcd524e9969403b2a8601e8c9e7a07109"} Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.713758 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerStarted","Data":"899f3022fabc926e1242270256e2f6d788e48d5640dad4ebbfdc4726d0f71942"} Nov 29 07:13:13 crc kubenswrapper[4943]: I1129 07:13:13.729298 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5666c9f9fb-m5bqf" podStartSLOduration=6.831623784 podStartE2EDuration="39.72927278s" podCreationTimestamp="2025-11-29 07:12:34 +0000 UTC" firstStartedPulling="2025-11-29 07:12:39.114101691 +0000 UTC m=+2334.044190444" lastFinishedPulling="2025-11-29 07:13:12.011750677 +0000 UTC m=+2366.941839440" observedRunningTime="2025-11-29 07:13:13.727777123 +0000 UTC m=+2368.657865876" watchObservedRunningTime="2025-11-29 07:13:13.72927278 +0000 UTC m=+2368.659361533" Nov 29 07:13:14 crc kubenswrapper[4943]: I1129 07:13:14.697776 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:14 crc kubenswrapper[4943]: I1129 07:13:14.698783 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="849c4c54-e078-43b4-8137-afe141df50cf" containerName="kube-state-metrics" containerID="cri-o://2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db" gracePeriod=30 Nov 29 07:13:14 crc kubenswrapper[4943]: I1129 07:13:14.726759 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerStarted","Data":"46efedb8f8d55ca8ddd5a07e4b4a7b3f7fab6cbc7675a50510b3c5829c157719"} Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.182392 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.296738 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trmfj\" (UniqueName: \"kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj\") pod \"849c4c54-e078-43b4-8137-afe141df50cf\" (UID: \"849c4c54-e078-43b4-8137-afe141df50cf\") " Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.301507 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj" (OuterVolumeSpecName: "kube-api-access-trmfj") pod "849c4c54-e078-43b4-8137-afe141df50cf" (UID: "849c4c54-e078-43b4-8137-afe141df50cf"). InnerVolumeSpecName "kube-api-access-trmfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.400062 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trmfj\" (UniqueName: \"kubernetes.io/projected/849c4c54-e078-43b4-8137-afe141df50cf-kube-api-access-trmfj\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.736287 4943 generic.go:334] "Generic (PLEG): container finished" podID="849c4c54-e078-43b4-8137-afe141df50cf" containerID="2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db" exitCode=2 Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.736376 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.736393 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"849c4c54-e078-43b4-8137-afe141df50cf","Type":"ContainerDied","Data":"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db"} Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.737326 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"849c4c54-e078-43b4-8137-afe141df50cf","Type":"ContainerDied","Data":"8017a9c2c25e5b1cfef7922fe25d68917f096a9a6ea14331cda7e5233f5baf20"} Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.737348 4943 scope.go:117] "RemoveContainer" containerID="2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.839420 4943 scope.go:117] "RemoveContainer" containerID="2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db" Nov 29 07:13:15 crc kubenswrapper[4943]: E1129 07:13:15.846827 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db\": container with ID starting with 2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db not found: ID does not exist" containerID="2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.846881 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db"} err="failed to get container status \"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db\": rpc error: code = NotFound desc = could not find container \"2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db\": container with ID starting with 2fe4bbd0a415b3e311c007e17f6c9d0640c5e30321e8c463d5645beb64ef02db not found: ID does not exist" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.859093 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.865247 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.872481 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:15 crc kubenswrapper[4943]: E1129 07:13:15.872894 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="849c4c54-e078-43b4-8137-afe141df50cf" containerName="kube-state-metrics" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.872916 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="849c4c54-e078-43b4-8137-afe141df50cf" containerName="kube-state-metrics" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.873202 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="849c4c54-e078-43b4-8137-afe141df50cf" containerName="kube-state-metrics" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.873837 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.877018 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.877547 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.879858 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.908235 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.908889 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.909018 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mzk2\" (UniqueName: \"kubernetes.io/projected/192defe0-3319-458c-b5d6-220b1a641b88-kube-api-access-7mzk2\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:15 crc kubenswrapper[4943]: I1129 07:13:15.909090 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.011220 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.011320 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mzk2\" (UniqueName: \"kubernetes.io/projected/192defe0-3319-458c-b5d6-220b1a641b88-kube-api-access-7mzk2\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.011341 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.011423 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.015724 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.016051 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.021205 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/192defe0-3319-458c-b5d6-220b1a641b88-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.030384 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mzk2\" (UniqueName: \"kubernetes.io/projected/192defe0-3319-458c-b5d6-220b1a641b88-kube-api-access-7mzk2\") pod \"kube-state-metrics-0\" (UID: \"192defe0-3319-458c-b5d6-220b1a641b88\") " pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.146608 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.206139 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.752631 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerStarted","Data":"e56e0f54914762aac4e5e948473bcc5524bc72b9743392d6215d3e5cb7d4f675"} Nov 29 07:13:16 crc kubenswrapper[4943]: I1129 07:13:16.776117 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.344845 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="849c4c54-e078-43b4-8137-afe141df50cf" path="/var/lib/kubelet/pods/849c4c54-e078-43b4-8137-afe141df50cf/volumes" Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.779370 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerStarted","Data":"8a308eedafbad24d635d5a4c124650b1e7fb096811235279f62a23b4bd5e91d8"} Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.781362 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"192defe0-3319-458c-b5d6-220b1a641b88","Type":"ContainerStarted","Data":"91d3351ad949583b932a7db5f31175d0ab77668ea5242d205229d911c6f841b2"} Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.781396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"192defe0-3319-458c-b5d6-220b1a641b88","Type":"ContainerStarted","Data":"5540cbfc0c59503f44176456ac51bb82c67923136875de29af2b2749b8d283e7"} Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.781551 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 29 07:13:17 crc kubenswrapper[4943]: I1129 07:13:17.803967 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.2414058470000002 podStartE2EDuration="2.803949126s" podCreationTimestamp="2025-11-29 07:13:15 +0000 UTC" firstStartedPulling="2025-11-29 07:13:16.786469513 +0000 UTC m=+2371.716558266" lastFinishedPulling="2025-11-29 07:13:17.349012792 +0000 UTC m=+2372.279101545" observedRunningTime="2025-11-29 07:13:17.794696567 +0000 UTC m=+2372.724785320" watchObservedRunningTime="2025-11-29 07:13:17.803949126 +0000 UTC m=+2372.734037879" Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859279 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerStarted","Data":"8fdb01928aa03202408e911cd798d43cc9c6cb67329ee2403914ad63138aab03"} Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859632 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-central-agent" containerID="cri-o://46efedb8f8d55ca8ddd5a07e4b4a7b3f7fab6cbc7675a50510b3c5829c157719" gracePeriod=30 Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859892 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="proxy-httpd" containerID="cri-o://8fdb01928aa03202408e911cd798d43cc9c6cb67329ee2403914ad63138aab03" gracePeriod=30 Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859904 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859935 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="sg-core" containerID="cri-o://8a308eedafbad24d635d5a4c124650b1e7fb096811235279f62a23b4bd5e91d8" gracePeriod=30 Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.859976 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-notification-agent" containerID="cri-o://e56e0f54914762aac4e5e948473bcc5524bc72b9743392d6215d3e5cb7d4f675" gracePeriod=30 Nov 29 07:13:24 crc kubenswrapper[4943]: I1129 07:13:24.885931 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9094803759999999 podStartE2EDuration="12.885912924s" podCreationTimestamp="2025-11-29 07:13:12 +0000 UTC" firstStartedPulling="2025-11-29 07:13:13.523830991 +0000 UTC m=+2368.453919764" lastFinishedPulling="2025-11-29 07:13:24.500263559 +0000 UTC m=+2379.430352312" observedRunningTime="2025-11-29 07:13:24.883915824 +0000 UTC m=+2379.814004597" watchObservedRunningTime="2025-11-29 07:13:24.885912924 +0000 UTC m=+2379.816001677" Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873300 4943 generic.go:334] "Generic (PLEG): container finished" podID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerID="8fdb01928aa03202408e911cd798d43cc9c6cb67329ee2403914ad63138aab03" exitCode=0 Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873688 4943 generic.go:334] "Generic (PLEG): container finished" podID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerID="8a308eedafbad24d635d5a4c124650b1e7fb096811235279f62a23b4bd5e91d8" exitCode=2 Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873700 4943 generic.go:334] "Generic (PLEG): container finished" podID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerID="e56e0f54914762aac4e5e948473bcc5524bc72b9743392d6215d3e5cb7d4f675" exitCode=0 Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873709 4943 generic.go:334] "Generic (PLEG): container finished" podID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerID="46efedb8f8d55ca8ddd5a07e4b4a7b3f7fab6cbc7675a50510b3c5829c157719" exitCode=0 Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873371 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerDied","Data":"8fdb01928aa03202408e911cd798d43cc9c6cb67329ee2403914ad63138aab03"} Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873780 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerDied","Data":"8a308eedafbad24d635d5a4c124650b1e7fb096811235279f62a23b4bd5e91d8"} Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873795 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerDied","Data":"e56e0f54914762aac4e5e948473bcc5524bc72b9743392d6215d3e5cb7d4f675"} Nov 29 07:13:25 crc kubenswrapper[4943]: I1129 07:13:25.873807 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerDied","Data":"46efedb8f8d55ca8ddd5a07e4b4a7b3f7fab6cbc7675a50510b3c5829c157719"} Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.227123 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.300903 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427162 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427556 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427658 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427708 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427802 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4shgn\" (UniqueName: \"kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427841 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.427867 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml\") pod \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\" (UID: \"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4\") " Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.429624 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.432061 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.451806 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn" (OuterVolumeSpecName: "kube-api-access-4shgn") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "kube-api-access-4shgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.466739 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts" (OuterVolumeSpecName: "scripts") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.470824 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.529588 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.529616 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.529627 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4shgn\" (UniqueName: \"kubernetes.io/projected/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-kube-api-access-4shgn\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.529638 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.529648 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.608727 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data" (OuterVolumeSpecName: "config-data") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.632234 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.636378 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" (UID: "d1970fb3-c361-4a2e-ae4a-a1696d05a4a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.733945 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.882763 4943 generic.go:334] "Generic (PLEG): container finished" podID="a3301c2a-4575-4e54-a396-d31fb9c5e427" containerID="eccb4dbe92cf56c6596c85432ca0f1425762c93367c44aaa8cd07ab960accd45" exitCode=0 Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.882842 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m6jf9" event={"ID":"a3301c2a-4575-4e54-a396-d31fb9c5e427","Type":"ContainerDied","Data":"eccb4dbe92cf56c6596c85432ca0f1425762c93367c44aaa8cd07ab960accd45"} Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.885705 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1970fb3-c361-4a2e-ae4a-a1696d05a4a4","Type":"ContainerDied","Data":"899f3022fabc926e1242270256e2f6d788e48d5640dad4ebbfdc4726d0f71942"} Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.885738 4943 scope.go:117] "RemoveContainer" containerID="8fdb01928aa03202408e911cd798d43cc9c6cb67329ee2403914ad63138aab03" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.885778 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.919376 4943 scope.go:117] "RemoveContainer" containerID="8a308eedafbad24d635d5a4c124650b1e7fb096811235279f62a23b4bd5e91d8" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.919392 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.936729 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.936971 4943 scope.go:117] "RemoveContainer" containerID="e56e0f54914762aac4e5e948473bcc5524bc72b9743392d6215d3e5cb7d4f675" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.946447 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:26 crc kubenswrapper[4943]: E1129 07:13:26.948875 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-notification-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.948919 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-notification-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: E1129 07:13:26.948948 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-central-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.948955 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-central-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: E1129 07:13:26.948970 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="sg-core" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.948985 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="sg-core" Nov 29 07:13:26 crc kubenswrapper[4943]: E1129 07:13:26.948998 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="proxy-httpd" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.949006 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="proxy-httpd" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.949271 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-central-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.949297 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="ceilometer-notification-agent" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.949310 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="sg-core" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.949328 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" containerName="proxy-httpd" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.951149 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.955517 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.955780 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.955921 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.960551 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:26 crc kubenswrapper[4943]: I1129 07:13:26.965420 4943 scope.go:117] "RemoveContainer" containerID="46efedb8f8d55ca8ddd5a07e4b4a7b3f7fab6cbc7675a50510b3c5829c157719" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.038946 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039019 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039041 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039195 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039280 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039358 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bq47\" (UniqueName: \"kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039410 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.039462 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141055 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141186 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141269 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141302 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141358 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141407 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141480 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bq47\" (UniqueName: \"kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.141524 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.142256 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.142706 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.145847 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.146601 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.146981 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.148366 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.148368 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.157904 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bq47\" (UniqueName: \"kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47\") pod \"ceilometer-0\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.277256 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.338993 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1970fb3-c361-4a2e-ae4a-a1696d05a4a4" path="/var/lib/kubelet/pods/d1970fb3-c361-4a2e-ae4a-a1696d05a4a4/volumes" Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.754806 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:27 crc kubenswrapper[4943]: W1129 07:13:27.774997 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2eece6d1_cde3_4cad_bfbe_23c766da7078.slice/crio-324bf9d2d6b341632fc24571d275b42734bdf0386475132d4b307205872b0ca6 WatchSource:0}: Error finding container 324bf9d2d6b341632fc24571d275b42734bdf0386475132d4b307205872b0ca6: Status 404 returned error can't find the container with id 324bf9d2d6b341632fc24571d275b42734bdf0386475132d4b307205872b0ca6 Nov 29 07:13:27 crc kubenswrapper[4943]: I1129 07:13:27.899660 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerStarted","Data":"324bf9d2d6b341632fc24571d275b42734bdf0386475132d4b307205872b0ca6"} Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.211787 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.260944 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261033 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261062 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261110 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261137 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ch68\" (UniqueName: \"kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261144 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.261162 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data\") pod \"a3301c2a-4575-4e54-a396-d31fb9c5e427\" (UID: \"a3301c2a-4575-4e54-a396-d31fb9c5e427\") " Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.262132 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3301c2a-4575-4e54-a396-d31fb9c5e427-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.267141 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68" (OuterVolumeSpecName: "kube-api-access-6ch68") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "kube-api-access-6ch68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.269103 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.269447 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts" (OuterVolumeSpecName: "scripts") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.287681 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.311922 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data" (OuterVolumeSpecName: "config-data") pod "a3301c2a-4575-4e54-a396-d31fb9c5e427" (UID: "a3301c2a-4575-4e54-a396-d31fb9c5e427"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.363709 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.363741 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.363750 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.363760 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ch68\" (UniqueName: \"kubernetes.io/projected/a3301c2a-4575-4e54-a396-d31fb9c5e427-kube-api-access-6ch68\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.363773 4943 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a3301c2a-4575-4e54-a396-d31fb9c5e427-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.908635 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m6jf9" event={"ID":"a3301c2a-4575-4e54-a396-d31fb9c5e427","Type":"ContainerDied","Data":"326241d5dcc86bb2c5784c1f4a09490322d2d4372fa7311c65b3685e820ffb8b"} Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.908875 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="326241d5dcc86bb2c5784c1f4a09490322d2d4372fa7311c65b3685e820ffb8b" Nov 29 07:13:28 crc kubenswrapper[4943]: I1129 07:13:28.908846 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m6jf9" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.221145 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:29 crc kubenswrapper[4943]: E1129 07:13:29.221524 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" containerName="cinder-db-sync" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.221541 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" containerName="cinder-db-sync" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.221792 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" containerName="cinder-db-sync" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.222746 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.225406 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.225720 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.225799 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.229077 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hs7mc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.244175 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277242 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277299 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxts8\" (UniqueName: \"kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277322 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277463 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277552 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.277727 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.287806 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.290091 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.315301 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.380327 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.380398 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plqfv\" (UniqueName: \"kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.380437 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.380490 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.380545 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382060 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382140 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382167 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxts8\" (UniqueName: \"kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382196 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382243 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382308 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.382444 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.386927 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.387332 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.389351 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.392235 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.419123 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxts8\" (UniqueName: \"kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8\") pod \"cinder-scheduler-0\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.485376 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.485450 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.485510 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plqfv\" (UniqueName: \"kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.485544 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.485606 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.486415 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.486415 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.486652 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.486652 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.507155 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plqfv\" (UniqueName: \"kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv\") pod \"dnsmasq-dns-64b54fbc47-lgfqc\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.526505 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.531709 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.534916 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.537895 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.545164 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588744 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588800 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588846 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588899 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxhxv\" (UniqueName: \"kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588964 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.588999 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.589119 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.618430 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691662 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxhxv\" (UniqueName: \"kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691797 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691853 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691883 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691931 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.691972 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.692535 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.694441 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.702814 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.708053 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.722203 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.722464 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.726248 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxhxv\" (UniqueName: \"kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv\") pod \"cinder-api-0\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " pod="openstack/cinder-api-0" Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.939624 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerStarted","Data":"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563"} Nov 29 07:13:29 crc kubenswrapper[4943]: I1129 07:13:29.954782 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.103689 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.247087 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.357450 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.948329 4943 generic.go:334] "Generic (PLEG): container finished" podID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerID="9aa2da276e13f5f47c85c3432cbbbd7687776f53ea532f9fa1bc223e1b4d129f" exitCode=0 Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.948418 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" event={"ID":"1738eb89-d861-49e6-aee5-ff918ce93bcb","Type":"ContainerDied","Data":"9aa2da276e13f5f47c85c3432cbbbd7687776f53ea532f9fa1bc223e1b4d129f"} Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.948985 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" event={"ID":"1738eb89-d861-49e6-aee5-ff918ce93bcb","Type":"ContainerStarted","Data":"b3b10ba398137fdd4a3e946670ae52a311f5322aeea56abdc47034e918c93770"} Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.958756 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerStarted","Data":"2d7e67ee4826a142aba554357775e3f918130b3b0b3b4e528d7652c17ece00e1"} Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.962789 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerStarted","Data":"1ebf475b3364cc5c220f015bf5d18253542c1cf2d9faed8e2ad5d36580fab56f"} Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.992899 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerStarted","Data":"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829"} Nov 29 07:13:30 crc kubenswrapper[4943]: I1129 07:13:30.992951 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerStarted","Data":"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962"} Nov 29 07:13:31 crc kubenswrapper[4943]: I1129 07:13:31.613333 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.004738 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" event={"ID":"1738eb89-d861-49e6-aee5-ff918ce93bcb","Type":"ContainerStarted","Data":"a39eb700b49b3db8da7aebf54a99c82cc728b4b7f759c33261a89dcb4b6d2c3b"} Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.004833 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.007755 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerStarted","Data":"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0"} Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.023452 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" podStartSLOduration=3.023433343 podStartE2EDuration="3.023433343s" podCreationTimestamp="2025-11-29 07:13:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:32.023065573 +0000 UTC m=+2386.953154336" watchObservedRunningTime="2025-11-29 07:13:32.023433343 +0000 UTC m=+2386.953522096" Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.613580 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:13:32 crc kubenswrapper[4943]: I1129 07:13:32.613640 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.025115 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerStarted","Data":"8d28fa321e107de29e6ea9254180072cfd23149aca574b0e92d666f60fb19d32"} Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.025457 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerStarted","Data":"48332a28964ad22e2231ac04b18d62d6743b9133533b12003a634f9db525b9ff"} Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.035422 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerStarted","Data":"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1"} Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.036354 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.046302 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.049787293 podStartE2EDuration="4.046288007s" podCreationTimestamp="2025-11-29 07:13:29 +0000 UTC" firstStartedPulling="2025-11-29 07:13:30.174148119 +0000 UTC m=+2385.104236872" lastFinishedPulling="2025-11-29 07:13:31.170648833 +0000 UTC m=+2386.100737586" observedRunningTime="2025-11-29 07:13:33.045468786 +0000 UTC m=+2387.975557539" watchObservedRunningTime="2025-11-29 07:13:33.046288007 +0000 UTC m=+2387.976376760" Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.068009 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerStarted","Data":"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c"} Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.068434 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api-log" containerID="cri-o://9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" gracePeriod=30 Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.068601 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api" containerID="cri-o://98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" gracePeriod=30 Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.084760 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.950408659 podStartE2EDuration="7.084738846s" podCreationTimestamp="2025-11-29 07:13:26 +0000 UTC" firstStartedPulling="2025-11-29 07:13:27.7770342 +0000 UTC m=+2382.707122953" lastFinishedPulling="2025-11-29 07:13:31.911364387 +0000 UTC m=+2386.841453140" observedRunningTime="2025-11-29 07:13:33.078036131 +0000 UTC m=+2388.008124884" watchObservedRunningTime="2025-11-29 07:13:33.084738846 +0000 UTC m=+2388.014827599" Nov 29 07:13:33 crc kubenswrapper[4943]: I1129 07:13:33.140923 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.140905272 podStartE2EDuration="4.140905272s" podCreationTimestamp="2025-11-29 07:13:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:33.139573119 +0000 UTC m=+2388.069661872" watchObservedRunningTime="2025-11-29 07:13:33.140905272 +0000 UTC m=+2388.070994025" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.040076 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084046 4943 generic.go:334] "Generic (PLEG): container finished" podID="0108d626-2458-419a-a52b-fe532d7c0abe" containerID="98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" exitCode=0 Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084079 4943 generic.go:334] "Generic (PLEG): container finished" podID="0108d626-2458-419a-a52b-fe532d7c0abe" containerID="9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" exitCode=143 Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084253 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084269 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerDied","Data":"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c"} Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084319 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerDied","Data":"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0"} Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0108d626-2458-419a-a52b-fe532d7c0abe","Type":"ContainerDied","Data":"2d7e67ee4826a142aba554357775e3f918130b3b0b3b4e528d7652c17ece00e1"} Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.084353 4943 scope.go:117] "RemoveContainer" containerID="98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.103812 4943 scope.go:117] "RemoveContainer" containerID="9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116269 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116323 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116349 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116373 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116411 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116445 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116544 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116604 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxhxv\" (UniqueName: \"kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv\") pod \"0108d626-2458-419a-a52b-fe532d7c0abe\" (UID: \"0108d626-2458-419a-a52b-fe532d7c0abe\") " Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.116911 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0108d626-2458-419a-a52b-fe532d7c0abe-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.118325 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs" (OuterVolumeSpecName: "logs") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.124534 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts" (OuterVolumeSpecName: "scripts") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.124685 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv" (OuterVolumeSpecName: "kube-api-access-vxhxv") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "kube-api-access-vxhxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.127936 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.140062 4943 scope.go:117] "RemoveContainer" containerID="98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" Nov 29 07:13:34 crc kubenswrapper[4943]: E1129 07:13:34.143612 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c\": container with ID starting with 98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c not found: ID does not exist" containerID="98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.143659 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c"} err="failed to get container status \"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c\": rpc error: code = NotFound desc = could not find container \"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c\": container with ID starting with 98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c not found: ID does not exist" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.143686 4943 scope.go:117] "RemoveContainer" containerID="9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" Nov 29 07:13:34 crc kubenswrapper[4943]: E1129 07:13:34.148488 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0\": container with ID starting with 9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0 not found: ID does not exist" containerID="9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.148539 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0"} err="failed to get container status \"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0\": rpc error: code = NotFound desc = could not find container \"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0\": container with ID starting with 9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0 not found: ID does not exist" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.148629 4943 scope.go:117] "RemoveContainer" containerID="98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.149853 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c"} err="failed to get container status \"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c\": rpc error: code = NotFound desc = could not find container \"98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c\": container with ID starting with 98c49fc1d1032af59e4305c9c8db3be869a59e952a019d349d53f5b3edba195c not found: ID does not exist" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.149962 4943 scope.go:117] "RemoveContainer" containerID="9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.154950 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0"} err="failed to get container status \"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0\": rpc error: code = NotFound desc = could not find container \"9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0\": container with ID starting with 9b334649fea05189d9e071bce2320ca720582b67bf566fa6d0e9e8e82614d9c0 not found: ID does not exist" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.171610 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.173791 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data" (OuterVolumeSpecName: "config-data") pod "0108d626-2458-419a-a52b-fe532d7c0abe" (UID: "0108d626-2458-419a-a52b-fe532d7c0abe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219086 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0108d626-2458-419a-a52b-fe532d7c0abe-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219120 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxhxv\" (UniqueName: \"kubernetes.io/projected/0108d626-2458-419a-a52b-fe532d7c0abe-kube-api-access-vxhxv\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219131 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219158 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219169 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.219179 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0108d626-2458-419a-a52b-fe532d7c0abe-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.475332 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.482450 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.508806 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:34 crc kubenswrapper[4943]: E1129 07:13:34.509219 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api-log" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.509236 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api-log" Nov 29 07:13:34 crc kubenswrapper[4943]: E1129 07:13:34.509253 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.509259 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.509450 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api-log" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.509471 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" containerName="cinder-api" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.510364 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.512879 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.513216 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.513432 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.516365 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.546251 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.630996 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631055 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631322 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-logs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631419 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631454 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-scripts\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631621 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631866 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c56z5\" (UniqueName: \"kubernetes.io/projected/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-kube-api-access-c56z5\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.631960 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.632011 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.733837 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-logs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.733909 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.733932 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-scripts\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.733991 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734007 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734052 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c56z5\" (UniqueName: \"kubernetes.io/projected/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-kube-api-access-c56z5\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734109 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734140 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734191 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734214 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.734320 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-logs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.738361 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.739116 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.739223 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-scripts\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.741196 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.754479 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.756616 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c56z5\" (UniqueName: \"kubernetes.io/projected/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-kube-api-access-c56z5\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.756912 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c-config-data\") pod \"cinder-api-0\" (UID: \"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c\") " pod="openstack/cinder-api-0" Nov 29 07:13:34 crc kubenswrapper[4943]: I1129 07:13:34.826788 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 07:13:35 crc kubenswrapper[4943]: I1129 07:13:35.310090 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 07:13:35 crc kubenswrapper[4943]: W1129 07:13:35.318037 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8943a5ae_79c8_4ea7_a4fb_eba8cb993f1c.slice/crio-7ece22bf381d631b818c066e364edf490cede5931fad0c0042bbfb77ccaf6df5 WatchSource:0}: Error finding container 7ece22bf381d631b818c066e364edf490cede5931fad0c0042bbfb77ccaf6df5: Status 404 returned error can't find the container with id 7ece22bf381d631b818c066e364edf490cede5931fad0c0042bbfb77ccaf6df5 Nov 29 07:13:35 crc kubenswrapper[4943]: I1129 07:13:35.336726 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0108d626-2458-419a-a52b-fe532d7c0abe" path="/var/lib/kubelet/pods/0108d626-2458-419a-a52b-fe532d7c0abe/volumes" Nov 29 07:13:36 crc kubenswrapper[4943]: I1129 07:13:36.128198 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c","Type":"ContainerStarted","Data":"377f0cf1d72b836075aeb2f08e67a99777b5b4130a01e14800f7f2dca4add2b4"} Nov 29 07:13:36 crc kubenswrapper[4943]: I1129 07:13:36.128460 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c","Type":"ContainerStarted","Data":"7ece22bf381d631b818c066e364edf490cede5931fad0c0042bbfb77ccaf6df5"} Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.142419 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c","Type":"ContainerStarted","Data":"0d997a5697c00538699f6e34ce2d0409d86bb2b7bf7a8ccbc382c308ed7291f7"} Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.143038 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.167357 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.1673319859999998 podStartE2EDuration="3.167331986s" podCreationTimestamp="2025-11-29 07:13:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:37.163354759 +0000 UTC m=+2392.093443542" watchObservedRunningTime="2025-11-29 07:13:37.167331986 +0000 UTC m=+2392.097420739" Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.892889 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.893133 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-central-agent" containerID="cri-o://b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" gracePeriod=30 Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.893189 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="proxy-httpd" containerID="cri-o://bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" gracePeriod=30 Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.893239 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="sg-core" containerID="cri-o://d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" gracePeriod=30 Nov 29 07:13:37 crc kubenswrapper[4943]: I1129 07:13:37.893382 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-notification-agent" containerID="cri-o://e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" gracePeriod=30 Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.153703 4943 generic.go:334] "Generic (PLEG): container finished" podID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerID="bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" exitCode=0 Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.154268 4943 generic.go:334] "Generic (PLEG): container finished" podID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerID="d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" exitCode=2 Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.154100 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerDied","Data":"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1"} Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.154554 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerDied","Data":"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829"} Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.629436 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738348 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738417 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738489 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738531 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738581 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738639 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738698 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.738750 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bq47\" (UniqueName: \"kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47\") pod \"2eece6d1-cde3-4cad-bfbe-23c766da7078\" (UID: \"2eece6d1-cde3-4cad-bfbe-23c766da7078\") " Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.740629 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.740743 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.745058 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts" (OuterVolumeSpecName: "scripts") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.746444 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47" (OuterVolumeSpecName: "kube-api-access-2bq47") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "kube-api-access-2bq47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.765067 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.783641 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.802219 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.821279 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data" (OuterVolumeSpecName: "config-data") pod "2eece6d1-cde3-4cad-bfbe-23c766da7078" (UID: "2eece6d1-cde3-4cad-bfbe-23c766da7078"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.840922 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.840957 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bq47\" (UniqueName: \"kubernetes.io/projected/2eece6d1-cde3-4cad-bfbe-23c766da7078-kube-api-access-2bq47\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.840974 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.840985 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.840997 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.841009 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eece6d1-cde3-4cad-bfbe-23c766da7078-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.841020 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:38 crc kubenswrapper[4943]: I1129 07:13:38.841030 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2eece6d1-cde3-4cad-bfbe-23c766da7078-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.164167 4943 generic.go:334] "Generic (PLEG): container finished" podID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" containerID="f4c31a8fb5196326cd984ff41e04d48ad5a2aebc72c4a42ca33a086806c151fa" exitCode=0 Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.164232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t85kr" event={"ID":"6e6b4461-55fa-4092-b3c4-bc414ea16f70","Type":"ContainerDied","Data":"f4c31a8fb5196326cd984ff41e04d48ad5a2aebc72c4a42ca33a086806c151fa"} Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.167889 4943 generic.go:334] "Generic (PLEG): container finished" podID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerID="e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" exitCode=0 Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.167943 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.167949 4943 generic.go:334] "Generic (PLEG): container finished" podID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerID="b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" exitCode=0 Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.167987 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerDied","Data":"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962"} Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.168028 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerDied","Data":"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563"} Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.168048 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2eece6d1-cde3-4cad-bfbe-23c766da7078","Type":"ContainerDied","Data":"324bf9d2d6b341632fc24571d275b42734bdf0386475132d4b307205872b0ca6"} Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.168073 4943 scope.go:117] "RemoveContainer" containerID="bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.193162 4943 scope.go:117] "RemoveContainer" containerID="d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.220833 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.229300 4943 scope.go:117] "RemoveContainer" containerID="e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.235140 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.247800 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.248367 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="sg-core" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248389 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="sg-core" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.248408 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-notification-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248415 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-notification-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.248425 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="proxy-httpd" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248433 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="proxy-httpd" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.248443 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-central-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248451 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-central-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248682 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-notification-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248700 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="ceilometer-central-agent" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248715 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="proxy-httpd" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.248729 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" containerName="sg-core" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.250983 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.261895 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.262048 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.262101 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.263844 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.276019 4943 scope.go:117] "RemoveContainer" containerID="b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.301882 4943 scope.go:117] "RemoveContainer" containerID="bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.302508 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1\": container with ID starting with bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1 not found: ID does not exist" containerID="bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.302617 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1"} err="failed to get container status \"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1\": rpc error: code = NotFound desc = could not find container \"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1\": container with ID starting with bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.302660 4943 scope.go:117] "RemoveContainer" containerID="d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.303137 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829\": container with ID starting with d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829 not found: ID does not exist" containerID="d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.303369 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829"} err="failed to get container status \"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829\": rpc error: code = NotFound desc = could not find container \"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829\": container with ID starting with d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.303493 4943 scope.go:117] "RemoveContainer" containerID="e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.303935 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962\": container with ID starting with e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962 not found: ID does not exist" containerID="e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304026 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962"} err="failed to get container status \"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962\": rpc error: code = NotFound desc = could not find container \"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962\": container with ID starting with e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304046 4943 scope.go:117] "RemoveContainer" containerID="b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" Nov 29 07:13:39 crc kubenswrapper[4943]: E1129 07:13:39.304312 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563\": container with ID starting with b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563 not found: ID does not exist" containerID="b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304351 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563"} err="failed to get container status \"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563\": rpc error: code = NotFound desc = could not find container \"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563\": container with ID starting with b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304377 4943 scope.go:117] "RemoveContainer" containerID="bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304755 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1"} err="failed to get container status \"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1\": rpc error: code = NotFound desc = could not find container \"bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1\": container with ID starting with bd3fab8fab2e9deb25fedcfbb168dafaa8bab9b3930510f2e6879aab74aa27b1 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.304910 4943 scope.go:117] "RemoveContainer" containerID="d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.305256 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829"} err="failed to get container status \"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829\": rpc error: code = NotFound desc = could not find container \"d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829\": container with ID starting with d9b8b7f09fd4fa39ab89524a9f9ae19de6ac0aaeb8ff105f6662fba9d9504829 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.305282 4943 scope.go:117] "RemoveContainer" containerID="e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.305516 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962"} err="failed to get container status \"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962\": rpc error: code = NotFound desc = could not find container \"e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962\": container with ID starting with e51fc9be0af68e3d0c5b3c83e8642e417c189c1e494c1b1bd54b0b96a5120962 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.305644 4943 scope.go:117] "RemoveContainer" containerID="b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.306004 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563"} err="failed to get container status \"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563\": rpc error: code = NotFound desc = could not find container \"b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563\": container with ID starting with b797b032909550346f0367c97f940afbc20a860eff469a4c8a6fb2cc795e2563 not found: ID does not exist" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.337018 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2eece6d1-cde3-4cad-bfbe-23c766da7078" path="/var/lib/kubelet/pods/2eece6d1-cde3-4cad-bfbe-23c766da7078/volumes" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349152 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349182 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349204 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349266 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349440 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n56nv\" (UniqueName: \"kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349813 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349858 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.349905 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.451707 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.451772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.451800 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.451912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.452611 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n56nv\" (UniqueName: \"kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.452722 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.452757 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.452803 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.453258 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.453746 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.455971 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.457658 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.458226 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.458508 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.461543 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.473860 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n56nv\" (UniqueName: \"kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv\") pod \"ceilometer-0\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.577885 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.621435 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.690802 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.691056 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="dnsmasq-dns" containerID="cri-o://a363585a07ce3b1222b542c528d329a0bfa56a70d09a494b5ec4c2f8797488fb" gracePeriod=10 Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.848251 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 29 07:13:39 crc kubenswrapper[4943]: I1129 07:13:39.921651 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.104346 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:40 crc kubenswrapper[4943]: W1129 07:13:40.117231 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3099f8bd_f2f1_4685_a4fd_73311457eac7.slice/crio-2c4ba955a824113709ce10bc3f65ebc39dbd27bf7aa52542a427122dcd90088d WatchSource:0}: Error finding container 2c4ba955a824113709ce10bc3f65ebc39dbd27bf7aa52542a427122dcd90088d: Status 404 returned error can't find the container with id 2c4ba955a824113709ce10bc3f65ebc39dbd27bf7aa52542a427122dcd90088d Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.182087 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerStarted","Data":"2c4ba955a824113709ce10bc3f65ebc39dbd27bf7aa52542a427122dcd90088d"} Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.185263 4943 generic.go:334] "Generic (PLEG): container finished" podID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerID="a363585a07ce3b1222b542c528d329a0bfa56a70d09a494b5ec4c2f8797488fb" exitCode=0 Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.185356 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerDied","Data":"a363585a07ce3b1222b542c528d329a0bfa56a70d09a494b5ec4c2f8797488fb"} Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.185557 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="cinder-scheduler" containerID="cri-o://48332a28964ad22e2231ac04b18d62d6743b9133533b12003a634f9db525b9ff" gracePeriod=30 Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.185678 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="probe" containerID="cri-o://8d28fa321e107de29e6ea9254180072cfd23149aca574b0e92d666f60fb19d32" gracePeriod=30 Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.374931 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.156:5353: connect: connection refused" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.718357 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.882596 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb\") pod \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.882653 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config\") pod \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.882814 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4chs\" (UniqueName: \"kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs\") pod \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.882862 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc\") pod \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.882914 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb\") pod \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\" (UID: \"b7250ef3-7d67-4e63-bd25-ab511d058ee7\") " Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.888952 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs" (OuterVolumeSpecName: "kube-api-access-k4chs") pod "b7250ef3-7d67-4e63-bd25-ab511d058ee7" (UID: "b7250ef3-7d67-4e63-bd25-ab511d058ee7"). InnerVolumeSpecName "kube-api-access-k4chs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.936397 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config" (OuterVolumeSpecName: "config") pod "b7250ef3-7d67-4e63-bd25-ab511d058ee7" (UID: "b7250ef3-7d67-4e63-bd25-ab511d058ee7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.942986 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b7250ef3-7d67-4e63-bd25-ab511d058ee7" (UID: "b7250ef3-7d67-4e63-bd25-ab511d058ee7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.953514 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b7250ef3-7d67-4e63-bd25-ab511d058ee7" (UID: "b7250ef3-7d67-4e63-bd25-ab511d058ee7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.966801 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b7250ef3-7d67-4e63-bd25-ab511d058ee7" (UID: "b7250ef3-7d67-4e63-bd25-ab511d058ee7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.977595 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t85kr" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.984690 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4chs\" (UniqueName: \"kubernetes.io/projected/b7250ef3-7d67-4e63-bd25-ab511d058ee7-kube-api-access-k4chs\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.984721 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.984731 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.984741 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:40 crc kubenswrapper[4943]: I1129 07:13:40.984750 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7250ef3-7d67-4e63-bd25-ab511d058ee7-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.088854 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data\") pod \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.089347 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6d7x\" (UniqueName: \"kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x\") pod \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.089393 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle\") pod \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.089456 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data\") pod \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\" (UID: \"6e6b4461-55fa-4092-b3c4-bc414ea16f70\") " Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.114750 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6e6b4461-55fa-4092-b3c4-bc414ea16f70" (UID: "6e6b4461-55fa-4092-b3c4-bc414ea16f70"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.169896 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x" (OuterVolumeSpecName: "kube-api-access-t6d7x") pod "6e6b4461-55fa-4092-b3c4-bc414ea16f70" (UID: "6e6b4461-55fa-4092-b3c4-bc414ea16f70"). InnerVolumeSpecName "kube-api-access-t6d7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.181357 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e6b4461-55fa-4092-b3c4-bc414ea16f70" (UID: "6e6b4461-55fa-4092-b3c4-bc414ea16f70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.192275 4943 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.192313 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6d7x\" (UniqueName: \"kubernetes.io/projected/6e6b4461-55fa-4092-b3c4-bc414ea16f70-kube-api-access-t6d7x\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.192325 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.218709 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data" (OuterVolumeSpecName: "config-data") pod "6e6b4461-55fa-4092-b3c4-bc414ea16f70" (UID: "6e6b4461-55fa-4092-b3c4-bc414ea16f70"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.219112 4943 generic.go:334] "Generic (PLEG): container finished" podID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerID="8d28fa321e107de29e6ea9254180072cfd23149aca574b0e92d666f60fb19d32" exitCode=0 Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.219221 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerDied","Data":"8d28fa321e107de29e6ea9254180072cfd23149aca574b0e92d666f60fb19d32"} Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.225357 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-t85kr" event={"ID":"6e6b4461-55fa-4092-b3c4-bc414ea16f70","Type":"ContainerDied","Data":"40f248b26805b5734d88fb3101bd5496c166b9674becd15aa395c7c682cf2470"} Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.226233 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40f248b26805b5734d88fb3101bd5496c166b9674becd15aa395c7c682cf2470" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.225648 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-t85kr" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.229794 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" event={"ID":"b7250ef3-7d67-4e63-bd25-ab511d058ee7","Type":"ContainerDied","Data":"29183efa9c08ed2702a3c3565a7a2b369a3ef0b8da891980699bc21e1e8f2693"} Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.229847 4943 scope.go:117] "RemoveContainer" containerID="a363585a07ce3b1222b542c528d329a0bfa56a70d09a494b5ec4c2f8797488fb" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.229854 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c55bf9497-p8xzj" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.259901 4943 scope.go:117] "RemoveContainer" containerID="1f5f7fb2c6b73fea2d9b60237a42ff191ec6322d4d44afd1b1ebdee35af2ab2d" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.280117 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.290227 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c55bf9497-p8xzj"] Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.294899 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e6b4461-55fa-4092-b3c4-bc414ea16f70-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.367989 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" path="/var/lib/kubelet/pods/b7250ef3-7d67-4e63-bd25-ab511d058ee7/volumes" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.714886 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:13:41 crc kubenswrapper[4943]: E1129 07:13:41.716127 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" containerName="glance-db-sync" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.716143 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" containerName="glance-db-sync" Nov 29 07:13:41 crc kubenswrapper[4943]: E1129 07:13:41.716155 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="init" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.716162 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="init" Nov 29 07:13:41 crc kubenswrapper[4943]: E1129 07:13:41.716192 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="dnsmasq-dns" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.716198 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="dnsmasq-dns" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.716399 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7250ef3-7d67-4e63-bd25-ab511d058ee7" containerName="dnsmasq-dns" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.716416 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" containerName="glance-db-sync" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.723646 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.779178 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.804361 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l2kv\" (UniqueName: \"kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.804461 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.804499 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.804527 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.804593 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.906759 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l2kv\" (UniqueName: \"kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.906928 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.906959 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.906995 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.907048 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.908119 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.909032 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.909192 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.909678 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.910351 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.910968 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.916885 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 07:13:41 crc kubenswrapper[4943]: I1129 07:13:41.937675 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l2kv\" (UniqueName: \"kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv\") pod \"dnsmasq-dns-5b76cdf485-lmsb6\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.008879 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.008934 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5xgg\" (UniqueName: \"kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.008989 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.068018 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.110798 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.110853 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5xgg\" (UniqueName: \"kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.110902 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.111431 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.111629 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.130528 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5xgg\" (UniqueName: \"kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg\") pod \"community-operators-hqj99\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.238796 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.250050 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerStarted","Data":"42ff6ec336ec3791d775c4a96dc8e6c891a140b391f6953b015030271343a316"} Nov 29 07:13:42 crc kubenswrapper[4943]: I1129 07:13:42.879613 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 07:13:42 crc kubenswrapper[4943]: W1129 07:13:42.880481 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0bfd16_93aa_4f3f_882a_6333420ed038.slice/crio-df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e WatchSource:0}: Error finding container df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e: Status 404 returned error can't find the container with id df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e Nov 29 07:13:43 crc kubenswrapper[4943]: I1129 07:13:43.001975 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:13:43 crc kubenswrapper[4943]: I1129 07:13:43.260821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerStarted","Data":"f00f0971bbc89c6f88d63ac13e428b2f8df898b4cbf4791b78e9b1f6e4d3b794"} Nov 29 07:13:43 crc kubenswrapper[4943]: I1129 07:13:43.262311 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerStarted","Data":"df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e"} Nov 29 07:13:43 crc kubenswrapper[4943]: I1129 07:13:43.263621 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" event={"ID":"49915ae8-7a1f-446e-804d-299765490b05","Type":"ContainerStarted","Data":"6b6a3f3f63ea94a0dc9b52c2b69accdcb355efe91e7f579fd154a3495487092d"} Nov 29 07:13:44 crc kubenswrapper[4943]: I1129 07:13:44.274295 4943 generic.go:334] "Generic (PLEG): container finished" podID="49915ae8-7a1f-446e-804d-299765490b05" containerID="5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4" exitCode=0 Nov 29 07:13:44 crc kubenswrapper[4943]: I1129 07:13:44.274445 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" event={"ID":"49915ae8-7a1f-446e-804d-299765490b05","Type":"ContainerDied","Data":"5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4"} Nov 29 07:13:44 crc kubenswrapper[4943]: I1129 07:13:44.276192 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerID="58ec7b3a955edf867629f34068146db24deba884d46981263c3bf36aabf9d42d" exitCode=0 Nov 29 07:13:44 crc kubenswrapper[4943]: I1129 07:13:44.276225 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerDied","Data":"58ec7b3a955edf867629f34068146db24deba884d46981263c3bf36aabf9d42d"} Nov 29 07:13:44 crc kubenswrapper[4943]: I1129 07:13:44.919592 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.289435 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerStarted","Data":"71e8d81b6f92049c6a4febd96721a8aaeb8c8376445fa640351283e224e8b6ca"} Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.291792 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" event={"ID":"49915ae8-7a1f-446e-804d-299765490b05","Type":"ContainerStarted","Data":"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d"} Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.291876 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.294229 4943 generic.go:334] "Generic (PLEG): container finished" podID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerID="48332a28964ad22e2231ac04b18d62d6743b9133533b12003a634f9db525b9ff" exitCode=0 Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.294261 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerDied","Data":"48332a28964ad22e2231ac04b18d62d6743b9133533b12003a634f9db525b9ff"} Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.318395 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" podStartSLOduration=4.31837547 podStartE2EDuration="4.31837547s" podCreationTimestamp="2025-11-29 07:13:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:45.310096166 +0000 UTC m=+2400.240184909" watchObservedRunningTime="2025-11-29 07:13:45.31837547 +0000 UTC m=+2400.248464223" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.876823 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988154 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxts8\" (UniqueName: \"kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988232 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988293 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988386 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988540 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.988558 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.989130 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom\") pod \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\" (UID: \"ccaddac5-93ab-4df0-8b7e-a8195eca22c7\") " Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.989468 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.994140 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts" (OuterVolumeSpecName: "scripts") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.994933 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:45 crc kubenswrapper[4943]: I1129 07:13:45.995459 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8" (OuterVolumeSpecName: "kube-api-access-qxts8") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "kube-api-access-qxts8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.037497 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.090882 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.090921 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.090936 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxts8\" (UniqueName: \"kubernetes.io/projected/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-kube-api-access-qxts8\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.090950 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.095834 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data" (OuterVolumeSpecName: "config-data") pod "ccaddac5-93ab-4df0-8b7e-a8195eca22c7" (UID: "ccaddac5-93ab-4df0-8b7e-a8195eca22c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.192840 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccaddac5-93ab-4df0-8b7e-a8195eca22c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.306320 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.306339 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ccaddac5-93ab-4df0-8b7e-a8195eca22c7","Type":"ContainerDied","Data":"1ebf475b3364cc5c220f015bf5d18253542c1cf2d9faed8e2ad5d36580fab56f"} Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.306414 4943 scope.go:117] "RemoveContainer" containerID="8d28fa321e107de29e6ea9254180072cfd23149aca574b0e92d666f60fb19d32" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.332270 4943 scope.go:117] "RemoveContainer" containerID="48332a28964ad22e2231ac04b18d62d6743b9133533b12003a634f9db525b9ff" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.347517 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.361124 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.382725 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:46 crc kubenswrapper[4943]: E1129 07:13:46.383189 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="cinder-scheduler" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.383270 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="cinder-scheduler" Nov 29 07:13:46 crc kubenswrapper[4943]: E1129 07:13:46.383346 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="probe" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.383428 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="probe" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.383675 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="probe" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.383780 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" containerName="cinder-scheduler" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.384815 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.401313 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.443745 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.533814 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4jnm\" (UniqueName: \"kubernetes.io/projected/542148bb-e707-4112-bc40-5b2272056dd1-kube-api-access-z4jnm\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.533881 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.533932 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-scripts\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.533992 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/542148bb-e707-4112-bc40-5b2272056dd1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.534035 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.534346 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636498 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636617 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4jnm\" (UniqueName: \"kubernetes.io/projected/542148bb-e707-4112-bc40-5b2272056dd1-kube-api-access-z4jnm\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636642 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636660 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-scripts\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636895 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/542148bb-e707-4112-bc40-5b2272056dd1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.636952 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.637387 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/542148bb-e707-4112-bc40-5b2272056dd1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.642798 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.642982 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.644125 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-config-data\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.647019 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/542148bb-e707-4112-bc40-5b2272056dd1-scripts\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.662607 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4jnm\" (UniqueName: \"kubernetes.io/projected/542148bb-e707-4112-bc40-5b2272056dd1-kube-api-access-z4jnm\") pod \"cinder-scheduler-0\" (UID: \"542148bb-e707-4112-bc40-5b2272056dd1\") " pod="openstack/cinder-scheduler-0" Nov 29 07:13:46 crc kubenswrapper[4943]: I1129 07:13:46.724043 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.272166 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.319778 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"542148bb-e707-4112-bc40-5b2272056dd1","Type":"ContainerStarted","Data":"9aac3a7fdad59255a312a6a4f8594cd7b39094843f24025d5df83d7f99dcd7f1"} Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.323301 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerStarted","Data":"dc4b28761a3d357c2f2cd04fd1d0b710b7b94e48f5780e97c0d3ab242f1187b9"} Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.323449 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-central-agent" containerID="cri-o://42ff6ec336ec3791d775c4a96dc8e6c891a140b391f6953b015030271343a316" gracePeriod=30 Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.323742 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.323977 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="proxy-httpd" containerID="cri-o://dc4b28761a3d357c2f2cd04fd1d0b710b7b94e48f5780e97c0d3ab242f1187b9" gracePeriod=30 Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.324030 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="sg-core" containerID="cri-o://71e8d81b6f92049c6a4febd96721a8aaeb8c8376445fa640351283e224e8b6ca" gracePeriod=30 Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.324070 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-notification-agent" containerID="cri-o://f00f0971bbc89c6f88d63ac13e428b2f8df898b4cbf4791b78e9b1f6e4d3b794" gracePeriod=30 Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.342153 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccaddac5-93ab-4df0-8b7e-a8195eca22c7" path="/var/lib/kubelet/pods/ccaddac5-93ab-4df0-8b7e-a8195eca22c7/volumes" Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.351807 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.339578839 podStartE2EDuration="8.351785416s" podCreationTimestamp="2025-11-29 07:13:39 +0000 UTC" firstStartedPulling="2025-11-29 07:13:40.122591015 +0000 UTC m=+2395.052679768" lastFinishedPulling="2025-11-29 07:13:46.134797592 +0000 UTC m=+2401.064886345" observedRunningTime="2025-11-29 07:13:47.346704751 +0000 UTC m=+2402.276793514" watchObservedRunningTime="2025-11-29 07:13:47.351785416 +0000 UTC m=+2402.281874169" Nov 29 07:13:47 crc kubenswrapper[4943]: I1129 07:13:47.879772 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.422936 4943 generic.go:334] "Generic (PLEG): container finished" podID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerID="dc4b28761a3d357c2f2cd04fd1d0b710b7b94e48f5780e97c0d3ab242f1187b9" exitCode=0 Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423264 4943 generic.go:334] "Generic (PLEG): container finished" podID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerID="71e8d81b6f92049c6a4febd96721a8aaeb8c8376445fa640351283e224e8b6ca" exitCode=2 Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423272 4943 generic.go:334] "Generic (PLEG): container finished" podID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerID="f00f0971bbc89c6f88d63ac13e428b2f8df898b4cbf4791b78e9b1f6e4d3b794" exitCode=0 Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423278 4943 generic.go:334] "Generic (PLEG): container finished" podID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerID="42ff6ec336ec3791d775c4a96dc8e6c891a140b391f6953b015030271343a316" exitCode=0 Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423231 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerDied","Data":"dc4b28761a3d357c2f2cd04fd1d0b710b7b94e48f5780e97c0d3ab242f1187b9"} Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423369 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerDied","Data":"71e8d81b6f92049c6a4febd96721a8aaeb8c8376445fa640351283e224e8b6ca"} Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423386 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerDied","Data":"f00f0971bbc89c6f88d63ac13e428b2f8df898b4cbf4791b78e9b1f6e4d3b794"} Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.423396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerDied","Data":"42ff6ec336ec3791d775c4a96dc8e6c891a140b391f6953b015030271343a316"} Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.435464 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"542148bb-e707-4112-bc40-5b2272056dd1","Type":"ContainerStarted","Data":"ec8b69f0076919a58eaf6418fad77cb8cfbd712291520a45c9b8ecda52d882c1"} Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.603780 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697205 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697276 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697312 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697346 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697395 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n56nv\" (UniqueName: \"kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697501 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697560 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697612 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml\") pod \"3099f8bd-f2f1-4685-a4fd-73311457eac7\" (UID: \"3099f8bd-f2f1-4685-a4fd-73311457eac7\") " Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.697866 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.698301 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.701897 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.702270 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv" (OuterVolumeSpecName: "kube-api-access-n56nv") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "kube-api-access-n56nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.706726 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts" (OuterVolumeSpecName: "scripts") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.739846 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.762113 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.769810 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799739 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799768 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n56nv\" (UniqueName: \"kubernetes.io/projected/3099f8bd-f2f1-4685-a4fd-73311457eac7-kube-api-access-n56nv\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799778 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3099f8bd-f2f1-4685-a4fd-73311457eac7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799787 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799795 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.799804 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.822184 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data" (OuterVolumeSpecName: "config-data") pod "3099f8bd-f2f1-4685-a4fd-73311457eac7" (UID: "3099f8bd-f2f1-4685-a4fd-73311457eac7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:48 crc kubenswrapper[4943]: I1129 07:13:48.901068 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3099f8bd-f2f1-4685-a4fd-73311457eac7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.410042 4943 scope.go:117] "RemoveContainer" containerID="07109d17b0b24581bf2bcc89af945e803454f116c73a8d96b46ba426091ada09" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.452263 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3099f8bd-f2f1-4685-a4fd-73311457eac7","Type":"ContainerDied","Data":"2c4ba955a824113709ce10bc3f65ebc39dbd27bf7aa52542a427122dcd90088d"} Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.452631 4943 scope.go:117] "RemoveContainer" containerID="dc4b28761a3d357c2f2cd04fd1d0b710b7b94e48f5780e97c0d3ab242f1187b9" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.452751 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.461927 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"542148bb-e707-4112-bc40-5b2272056dd1","Type":"ContainerStarted","Data":"617822240a94d9f21bc81aabb45cc5dccf1b2e4a200b2a80a49bd8c045d75ae7"} Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.492019 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.491991987 podStartE2EDuration="3.491991987s" podCreationTimestamp="2025-11-29 07:13:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:49.484458681 +0000 UTC m=+2404.414547454" watchObservedRunningTime="2025-11-29 07:13:49.491991987 +0000 UTC m=+2404.422080740" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.503375 4943 scope.go:117] "RemoveContainer" containerID="71e8d81b6f92049c6a4febd96721a8aaeb8c8376445fa640351283e224e8b6ca" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.529423 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.565904 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.573788 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:49 crc kubenswrapper[4943]: E1129 07:13:49.574203 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-notification-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574220 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-notification-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: E1129 07:13:49.574235 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="sg-core" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574240 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="sg-core" Nov 29 07:13:49 crc kubenswrapper[4943]: E1129 07:13:49.574261 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="proxy-httpd" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574267 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="proxy-httpd" Nov 29 07:13:49 crc kubenswrapper[4943]: E1129 07:13:49.574277 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-central-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574282 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-central-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574429 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="proxy-httpd" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574447 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-notification-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574461 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="ceilometer-central-agent" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.574469 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" containerName="sg-core" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.576396 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.577686 4943 scope.go:117] "RemoveContainer" containerID="f00f0971bbc89c6f88d63ac13e428b2f8df898b4cbf4791b78e9b1f6e4d3b794" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.578549 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.579642 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.579837 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.638502 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.660532 4943 scope.go:117] "RemoveContainer" containerID="42ff6ec336ec3791d775c4a96dc8e6c891a140b391f6953b015030271343a316" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717627 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717700 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717765 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717800 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717928 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717959 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.717996 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5nkk\" (UniqueName: \"kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.718055 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820161 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820211 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820245 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820265 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820308 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820335 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820356 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5nkk\" (UniqueName: \"kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820390 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.820803 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.821150 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.826654 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.827826 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.833863 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.843788 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5nkk\" (UniqueName: \"kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.844045 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.851547 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.905948 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:13:49 crc kubenswrapper[4943]: I1129 07:13:49.941961 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:50 crc kubenswrapper[4943]: I1129 07:13:50.521609 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:13:51 crc kubenswrapper[4943]: I1129 07:13:51.340308 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3099f8bd-f2f1-4685-a4fd-73311457eac7" path="/var/lib/kubelet/pods/3099f8bd-f2f1-4685-a4fd-73311457eac7/volumes" Nov 29 07:13:51 crc kubenswrapper[4943]: I1129 07:13:51.483983 4943 generic.go:334] "Generic (PLEG): container finished" podID="68a1344d-4cce-4597-894c-f167c42efe84" containerID="391dce06ed9d044c50a78e5dfba711255d7f190bb557f7d231cee40b8757f07c" exitCode=0 Nov 29 07:13:51 crc kubenswrapper[4943]: I1129 07:13:51.484032 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" event={"ID":"68a1344d-4cce-4597-894c-f167c42efe84","Type":"ContainerDied","Data":"391dce06ed9d044c50a78e5dfba711255d7f190bb557f7d231cee40b8757f07c"} Nov 29 07:13:51 crc kubenswrapper[4943]: I1129 07:13:51.725737 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 29 07:13:52 crc kubenswrapper[4943]: I1129 07:13:52.070724 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:13:52 crc kubenswrapper[4943]: I1129 07:13:52.120602 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:52 crc kubenswrapper[4943]: I1129 07:13:52.120837 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="dnsmasq-dns" containerID="cri-o://a39eb700b49b3db8da7aebf54a99c82cc728b4b7f759c33261a89dcb4b6d2c3b" gracePeriod=10 Nov 29 07:13:52 crc kubenswrapper[4943]: I1129 07:13:52.495014 4943 generic.go:334] "Generic (PLEG): container finished" podID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerID="a39eb700b49b3db8da7aebf54a99c82cc728b4b7f759c33261a89dcb4b6d2c3b" exitCode=0 Nov 29 07:13:52 crc kubenswrapper[4943]: I1129 07:13:52.495115 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" event={"ID":"1738eb89-d861-49e6-aee5-ff918ce93bcb","Type":"ContainerDied","Data":"a39eb700b49b3db8da7aebf54a99c82cc728b4b7f759c33261a89dcb4b6d2c3b"} Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.145395 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.297263 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgxs2\" (UniqueName: \"kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2\") pod \"68a1344d-4cce-4597-894c-f167c42efe84\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.298048 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data\") pod \"68a1344d-4cce-4597-894c-f167c42efe84\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.298107 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle\") pod \"68a1344d-4cce-4597-894c-f167c42efe84\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.298135 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts\") pod \"68a1344d-4cce-4597-894c-f167c42efe84\" (UID: \"68a1344d-4cce-4597-894c-f167c42efe84\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.306783 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts" (OuterVolumeSpecName: "scripts") pod "68a1344d-4cce-4597-894c-f167c42efe84" (UID: "68a1344d-4cce-4597-894c-f167c42efe84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.307235 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2" (OuterVolumeSpecName: "kube-api-access-vgxs2") pod "68a1344d-4cce-4597-894c-f167c42efe84" (UID: "68a1344d-4cce-4597-894c-f167c42efe84"). InnerVolumeSpecName "kube-api-access-vgxs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.368018 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "68a1344d-4cce-4597-894c-f167c42efe84" (UID: "68a1344d-4cce-4597-894c-f167c42efe84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.394225 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data" (OuterVolumeSpecName: "config-data") pod "68a1344d-4cce-4597-894c-f167c42efe84" (UID: "68a1344d-4cce-4597-894c-f167c42efe84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.402780 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.402813 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.402823 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgxs2\" (UniqueName: \"kubernetes.io/projected/68a1344d-4cce-4597-894c-f167c42efe84-kube-api-access-vgxs2\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.402835 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68a1344d-4cce-4597-894c-f167c42efe84-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.498721 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.506836 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" event={"ID":"1738eb89-d861-49e6-aee5-ff918ce93bcb","Type":"ContainerDied","Data":"b3b10ba398137fdd4a3e946670ae52a311f5322aeea56abdc47034e918c93770"} Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.506880 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64b54fbc47-lgfqc" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.506888 4943 scope.go:117] "RemoveContainer" containerID="a39eb700b49b3db8da7aebf54a99c82cc728b4b7f759c33261a89dcb4b6d2c3b" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.510736 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" event={"ID":"68a1344d-4cce-4597-894c-f167c42efe84","Type":"ContainerDied","Data":"bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6"} Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.510773 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb0fb3944b85cb97e1325f4eda46cd2412a3d58176ae685103771d3e18946cf6" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.510838 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ws7ms" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.517539 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerStarted","Data":"c612e579447d32617b95759bccd096240e8e47ac85642c63161173281b25716e"} Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.571359 4943 scope.go:117] "RemoveContainer" containerID="9aa2da276e13f5f47c85c3432cbbbd7687776f53ea532f9fa1bc223e1b4d129f" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.597573 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:13:53 crc kubenswrapper[4943]: E1129 07:13:53.598000 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="dnsmasq-dns" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598014 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="dnsmasq-dns" Nov 29 07:13:53 crc kubenswrapper[4943]: E1129 07:13:53.598032 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a1344d-4cce-4597-894c-f167c42efe84" containerName="nova-cell0-conductor-db-sync" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598038 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a1344d-4cce-4597-894c-f167c42efe84" containerName="nova-cell0-conductor-db-sync" Nov 29 07:13:53 crc kubenswrapper[4943]: E1129 07:13:53.598052 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="init" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598059 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="init" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598246 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" containerName="dnsmasq-dns" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598275 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="68a1344d-4cce-4597-894c-f167c42efe84" containerName="nova-cell0-conductor-db-sync" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.598935 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.605654 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb\") pod \"1738eb89-d861-49e6-aee5-ff918ce93bcb\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.605721 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb\") pod \"1738eb89-d861-49e6-aee5-ff918ce93bcb\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.605759 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc\") pod \"1738eb89-d861-49e6-aee5-ff918ce93bcb\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.605796 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plqfv\" (UniqueName: \"kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv\") pod \"1738eb89-d861-49e6-aee5-ff918ce93bcb\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.605976 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config\") pod \"1738eb89-d861-49e6-aee5-ff918ce93bcb\" (UID: \"1738eb89-d861-49e6-aee5-ff918ce93bcb\") " Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.608197 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2hxgx" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.608398 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.616478 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.638388 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv" (OuterVolumeSpecName: "kube-api-access-plqfv") pod "1738eb89-d861-49e6-aee5-ff918ce93bcb" (UID: "1738eb89-d861-49e6-aee5-ff918ce93bcb"). InnerVolumeSpecName "kube-api-access-plqfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.692937 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1738eb89-d861-49e6-aee5-ff918ce93bcb" (UID: "1738eb89-d861-49e6-aee5-ff918ce93bcb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.695313 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1738eb89-d861-49e6-aee5-ff918ce93bcb" (UID: "1738eb89-d861-49e6-aee5-ff918ce93bcb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.696095 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config" (OuterVolumeSpecName: "config") pod "1738eb89-d861-49e6-aee5-ff918ce93bcb" (UID: "1738eb89-d861-49e6-aee5-ff918ce93bcb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.696224 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1738eb89-d861-49e6-aee5-ff918ce93bcb" (UID: "1738eb89-d861-49e6-aee5-ff918ce93bcb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.707919 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhc7w\" (UniqueName: \"kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708001 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708031 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708086 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708101 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708115 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708125 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1738eb89-d861-49e6-aee5-ff918ce93bcb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.708138 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plqfv\" (UniqueName: \"kubernetes.io/projected/1738eb89-d861-49e6-aee5-ff918ce93bcb-kube-api-access-plqfv\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.809423 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.809474 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.809611 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhc7w\" (UniqueName: \"kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.813985 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.817376 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.836380 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhc7w\" (UniqueName: \"kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w\") pod \"nova-cell0-conductor-0\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.849282 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.858994 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64b54fbc47-lgfqc"] Nov 29 07:13:53 crc kubenswrapper[4943]: I1129 07:13:53.927036 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.382677 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:13:54 crc kubenswrapper[4943]: W1129 07:13:54.383896 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd34120b5_7d6c_4fc5_a431_45bcfac00231.slice/crio-4d269cc661492267c954ff217cde78e3074497c3502e3df5d5eaa77dd0ef7307 WatchSource:0}: Error finding container 4d269cc661492267c954ff217cde78e3074497c3502e3df5d5eaa77dd0ef7307: Status 404 returned error can't find the container with id 4d269cc661492267c954ff217cde78e3074497c3502e3df5d5eaa77dd0ef7307 Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.540481 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerStarted","Data":"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.540910 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerStarted","Data":"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.543467 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerID="e90bf521231c4a745f16d2330c5c1c34ab4eb1574ba61bc6b55dc12b9dd01ab1" exitCode=0 Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.543536 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerDied","Data":"e90bf521231c4a745f16d2330c5c1c34ab4eb1574ba61bc6b55dc12b9dd01ab1"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.548081 4943 generic.go:334] "Generic (PLEG): container finished" podID="157d6f54-3436-40f4-b2ef-c16933cbbc72" containerID="4ffeada44816bc9d2238de419bbe56d443a99b305ef92427a376d41d563da45b" exitCode=0 Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.548154 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-6wspw" event={"ID":"157d6f54-3436-40f4-b2ef-c16933cbbc72","Type":"ContainerDied","Data":"4ffeada44816bc9d2238de419bbe56d443a99b305ef92427a376d41d563da45b"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.552813 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d34120b5-7d6c-4fc5-a431-45bcfac00231","Type":"ContainerStarted","Data":"4fc53b76db4c87e7a48a703ab7e0aaa7c17895117a9d229a83d1c92eb70d969c"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.553687 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.553712 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d34120b5-7d6c-4fc5-a431-45bcfac00231","Type":"ContainerStarted","Data":"4d269cc661492267c954ff217cde78e3074497c3502e3df5d5eaa77dd0ef7307"} Nov 29 07:13:54 crc kubenswrapper[4943]: I1129 07:13:54.587180 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.5871562890000002 podStartE2EDuration="1.587156289s" podCreationTimestamp="2025-11-29 07:13:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:54.579756266 +0000 UTC m=+2409.509845029" watchObservedRunningTime="2025-11-29 07:13:54.587156289 +0000 UTC m=+2409.517245032" Nov 29 07:13:55 crc kubenswrapper[4943]: I1129 07:13:55.350958 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1738eb89-d861-49e6-aee5-ff918ce93bcb" path="/var/lib/kubelet/pods/1738eb89-d861-49e6-aee5-ff918ce93bcb/volumes" Nov 29 07:13:55 crc kubenswrapper[4943]: I1129 07:13:55.929341 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-6wspw" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.061855 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfnnt\" (UniqueName: \"kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt\") pod \"157d6f54-3436-40f4-b2ef-c16933cbbc72\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.062019 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config\") pod \"157d6f54-3436-40f4-b2ef-c16933cbbc72\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.062200 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle\") pod \"157d6f54-3436-40f4-b2ef-c16933cbbc72\" (UID: \"157d6f54-3436-40f4-b2ef-c16933cbbc72\") " Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.067618 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt" (OuterVolumeSpecName: "kube-api-access-bfnnt") pod "157d6f54-3436-40f4-b2ef-c16933cbbc72" (UID: "157d6f54-3436-40f4-b2ef-c16933cbbc72"). InnerVolumeSpecName "kube-api-access-bfnnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.088779 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "157d6f54-3436-40f4-b2ef-c16933cbbc72" (UID: "157d6f54-3436-40f4-b2ef-c16933cbbc72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.102292 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config" (OuterVolumeSpecName: "config") pod "157d6f54-3436-40f4-b2ef-c16933cbbc72" (UID: "157d6f54-3436-40f4-b2ef-c16933cbbc72"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.164667 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfnnt\" (UniqueName: \"kubernetes.io/projected/157d6f54-3436-40f4-b2ef-c16933cbbc72-kube-api-access-bfnnt\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.164714 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.164724 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157d6f54-3436-40f4-b2ef-c16933cbbc72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.571270 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-6wspw" event={"ID":"157d6f54-3436-40f4-b2ef-c16933cbbc72","Type":"ContainerDied","Data":"6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e"} Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.571595 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6df441416f30a79da6e5587ae80b2e0c45097addf8e92af261c8da8516fbfa8e" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.571441 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-6wspw" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.844634 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:13:56 crc kubenswrapper[4943]: E1129 07:13:56.845049 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="157d6f54-3436-40f4-b2ef-c16933cbbc72" containerName="neutron-db-sync" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.845065 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="157d6f54-3436-40f4-b2ef-c16933cbbc72" containerName="neutron-db-sync" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.845319 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="157d6f54-3436-40f4-b2ef-c16933cbbc72" containerName="neutron-db-sync" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.846357 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.857484 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.958178 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.963258 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.976743 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-fnq66" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.977335 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.977368 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.977679 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.980776 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.988155 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.988223 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.988266 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd4m5\" (UniqueName: \"kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.988296 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:56 crc kubenswrapper[4943]: I1129 07:13:56.988317 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089264 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klb2m\" (UniqueName: \"kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089313 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089338 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089369 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089479 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089689 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089760 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089795 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089904 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.089994 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd4m5\" (UniqueName: \"kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.090149 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.090751 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.090848 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.091124 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.109392 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd4m5\" (UniqueName: \"kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5\") pod \"dnsmasq-dns-6d97fcdd8f-68vzd\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.169882 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.196036 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klb2m\" (UniqueName: \"kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.196091 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.196107 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.196169 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.196229 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.205250 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.206219 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.206970 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.209913 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.219211 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klb2m\" (UniqueName: \"kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m\") pod \"neutron-7d6765c5fb-nmqsz\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.287079 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.360975 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 29 07:13:57 crc kubenswrapper[4943]: I1129 07:13:57.718950 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.014962 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.615995 4943 generic.go:334] "Generic (PLEG): container finished" podID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerID="3a3e83868e2c960e2a4944de6d844be230c6fef614ff2782108accea3670e54b" exitCode=0 Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.616615 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" event={"ID":"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6","Type":"ContainerDied","Data":"3a3e83868e2c960e2a4944de6d844be230c6fef614ff2782108accea3670e54b"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.616641 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" event={"ID":"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6","Type":"ContainerStarted","Data":"221ae234d08a773798e2eaad365a4b533dd3bd06d62b8ca2bbdd8156025982dd"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.653802 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerStarted","Data":"37a6b2f8e99fb71f260e794a4a34944b4b9b615e2fa46270abe2a04a46405a3e"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.654052 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerStarted","Data":"b8feccb9ea48b2b34fde3ff96a5ee97fee732739981e6fb34b8fb1452116b56e"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.681230 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerStarted","Data":"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.711992 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerStarted","Data":"df493a34249296dfb8d0ffdb0a33e5e403eb9ca415707f05a0ab5c785d236d0d"} Nov 29 07:13:58 crc kubenswrapper[4943]: I1129 07:13:58.831390 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hqj99" podStartSLOduration=4.929943147 podStartE2EDuration="17.831354766s" podCreationTimestamp="2025-11-29 07:13:41 +0000 UTC" firstStartedPulling="2025-11-29 07:13:44.51783257 +0000 UTC m=+2399.447921323" lastFinishedPulling="2025-11-29 07:13:57.419244199 +0000 UTC m=+2412.349332942" observedRunningTime="2025-11-29 07:13:58.779294292 +0000 UTC m=+2413.709383045" watchObservedRunningTime="2025-11-29 07:13:58.831354766 +0000 UTC m=+2413.761443519" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.730634 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" event={"ID":"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6","Type":"ContainerStarted","Data":"16572ee100e338426e5ebf2d681db7cc64e71e138f5a43f44bc33c9c1ecbbacb"} Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.731045 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.739941 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerStarted","Data":"3cb8bc656cc4f81b6a6eda2bcdc07ecb2d6450c4fa4b59efec3c5615836bb535"} Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.739976 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.769370 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" podStartSLOduration=3.769349838 podStartE2EDuration="3.769349838s" podCreationTimestamp="2025-11-29 07:13:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:59.76013196 +0000 UTC m=+2414.690220723" watchObservedRunningTime="2025-11-29 07:13:59.769349838 +0000 UTC m=+2414.699438591" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.794532 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7d6765c5fb-nmqsz" podStartSLOduration=3.794513758 podStartE2EDuration="3.794513758s" podCreationTimestamp="2025-11-29 07:13:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:13:59.77552941 +0000 UTC m=+2414.705618163" watchObservedRunningTime="2025-11-29 07:13:59.794513758 +0000 UTC m=+2414.724602511" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.844091 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-64478cdc57-fhzpm"] Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.845427 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.847361 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.850017 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861306 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-public-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861382 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-internal-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861542 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-httpd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861662 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfx72\" (UniqueName: \"kubernetes.io/projected/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-kube-api-access-jfx72\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861732 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-combined-ca-bundle\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861843 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.861866 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-ovndb-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.867976 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64478cdc57-fhzpm"] Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.963883 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-httpd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.963955 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfx72\" (UniqueName: \"kubernetes.io/projected/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-kube-api-access-jfx72\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.963994 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-combined-ca-bundle\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.964032 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.964050 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-ovndb-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.964116 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-public-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.964152 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-internal-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.970294 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-internal-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.974093 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-httpd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.974937 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-ovndb-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.988434 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-public-tls-certs\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.988530 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-config\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.988887 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfx72\" (UniqueName: \"kubernetes.io/projected/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-kube-api-access-jfx72\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:13:59 crc kubenswrapper[4943]: I1129 07:13:59.989658 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd-combined-ca-bundle\") pod \"neutron-64478cdc57-fhzpm\" (UID: \"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd\") " pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.164069 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.751443 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-central-agent" containerID="cri-o://c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398" gracePeriod=30 Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.752257 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerStarted","Data":"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d"} Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.752862 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.753196 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="proxy-httpd" containerID="cri-o://275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d" gracePeriod=30 Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.753274 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="sg-core" containerID="cri-o://1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485" gracePeriod=30 Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.753324 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-notification-agent" containerID="cri-o://7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316" gracePeriod=30 Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.773278 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64478cdc57-fhzpm"] Nov 29 07:14:00 crc kubenswrapper[4943]: W1129 07:14:00.781827 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ff8ee17_b659_4a6e_8a68_ae4e4e6610cd.slice/crio-08180f7b881ee1de19a24b1392425f1c9395d03f6fe2bb16f8b7e6e0c0845949 WatchSource:0}: Error finding container 08180f7b881ee1de19a24b1392425f1c9395d03f6fe2bb16f8b7e6e0c0845949: Status 404 returned error can't find the container with id 08180f7b881ee1de19a24b1392425f1c9395d03f6fe2bb16f8b7e6e0c0845949 Nov 29 07:14:00 crc kubenswrapper[4943]: I1129 07:14:00.797979 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.191005085 podStartE2EDuration="11.797958994s" podCreationTimestamp="2025-11-29 07:13:49 +0000 UTC" firstStartedPulling="2025-11-29 07:13:53.051049532 +0000 UTC m=+2407.981138295" lastFinishedPulling="2025-11-29 07:13:59.658003461 +0000 UTC m=+2414.588092204" observedRunningTime="2025-11-29 07:14:00.794246322 +0000 UTC m=+2415.724335095" watchObservedRunningTime="2025-11-29 07:14:00.797958994 +0000 UTC m=+2415.728047747" Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.763108 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.764248 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64478cdc57-fhzpm" event={"ID":"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd","Type":"ContainerStarted","Data":"934504aed1e83ec38105834b8fc550b80fc3cc446b163d10a0f3ee67e2cfbdde"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.764276 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64478cdc57-fhzpm" event={"ID":"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd","Type":"ContainerStarted","Data":"97fba55b22cbb25f93e1863b636d6417236c15558f516b3b52887ae6e9500582"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.764290 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64478cdc57-fhzpm" event={"ID":"1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd","Type":"ContainerStarted","Data":"08180f7b881ee1de19a24b1392425f1c9395d03f6fe2bb16f8b7e6e0c0845949"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769393 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerDied","Data":"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769352 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a2a3e00-c497-473a-985a-f406a6a16604" containerID="275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d" exitCode=0 Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769485 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a2a3e00-c497-473a-985a-f406a6a16604" containerID="1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485" exitCode=2 Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769493 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a2a3e00-c497-473a-985a-f406a6a16604" containerID="7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316" exitCode=0 Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769508 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerDied","Data":"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.769520 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerDied","Data":"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316"} Nov 29 07:14:01 crc kubenswrapper[4943]: I1129 07:14:01.791165 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-64478cdc57-fhzpm" podStartSLOduration=2.791138157 podStartE2EDuration="2.791138157s" podCreationTimestamp="2025-11-29 07:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:01.778409453 +0000 UTC m=+2416.708498206" watchObservedRunningTime="2025-11-29 07:14:01.791138157 +0000 UTC m=+2416.721226910" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.239030 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.239380 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.613047 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.613409 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.613528 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.614415 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.614595 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" gracePeriod=600 Nov 29 07:14:02 crc kubenswrapper[4943]: E1129 07:14:02.734929 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.790636 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d"} Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.790705 4943 scope.go:117] "RemoveContainer" containerID="74f5fd4bd09ec071509bc04dfa178bb8764e6d0b7f45e141ac761bcd13f81c65" Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.790643 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" exitCode=0 Nov 29 07:14:02 crc kubenswrapper[4943]: I1129 07:14:02.791380 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:14:02 crc kubenswrapper[4943]: E1129 07:14:02.791685 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.289237 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-hqj99" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="registry-server" probeResult="failure" output=< Nov 29 07:14:03 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:14:03 crc kubenswrapper[4943]: > Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.610868 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747387 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747443 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747468 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747535 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5nkk\" (UniqueName: \"kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747557 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747641 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747682 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.747700 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle\") pod \"0a2a3e00-c497-473a-985a-f406a6a16604\" (UID: \"0a2a3e00-c497-473a-985a-f406a6a16604\") " Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.748922 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.748942 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.755320 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts" (OuterVolumeSpecName: "scripts") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.756069 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk" (OuterVolumeSpecName: "kube-api-access-d5nkk") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "kube-api-access-d5nkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.777647 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.801703 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a2a3e00-c497-473a-985a-f406a6a16604" containerID="c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398" exitCode=0 Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.801862 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.802400 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerDied","Data":"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398"} Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.802443 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a2a3e00-c497-473a-985a-f406a6a16604","Type":"ContainerDied","Data":"c612e579447d32617b95759bccd096240e8e47ac85642c63161173281b25716e"} Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.802485 4943 scope.go:117] "RemoveContainer" containerID="275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.809679 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.831245 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851054 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851229 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851241 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851263 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a2a3e00-c497-473a-985a-f406a6a16604-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851275 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5nkk\" (UniqueName: \"kubernetes.io/projected/0a2a3e00-c497-473a-985a-f406a6a16604-kube-api-access-d5nkk\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851287 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.851296 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.866425 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data" (OuterVolumeSpecName: "config-data") pod "0a2a3e00-c497-473a-985a-f406a6a16604" (UID: "0a2a3e00-c497-473a-985a-f406a6a16604"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.941447 4943 scope.go:117] "RemoveContainer" containerID="1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.953328 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a2a3e00-c497-473a-985a-f406a6a16604-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.956322 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.962400 4943 scope.go:117] "RemoveContainer" containerID="7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316" Nov 29 07:14:03 crc kubenswrapper[4943]: I1129 07:14:03.984718 4943 scope.go:117] "RemoveContainer" containerID="c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.005782 4943 scope.go:117] "RemoveContainer" containerID="275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.006317 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d\": container with ID starting with 275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d not found: ID does not exist" containerID="275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.006360 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d"} err="failed to get container status \"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d\": rpc error: code = NotFound desc = could not find container \"275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d\": container with ID starting with 275322a8727f3e446345ccc9c3359e13304e8f96a57be7053aadd47da5bd282d not found: ID does not exist" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.006388 4943 scope.go:117] "RemoveContainer" containerID="1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.006794 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485\": container with ID starting with 1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485 not found: ID does not exist" containerID="1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.006821 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485"} err="failed to get container status \"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485\": rpc error: code = NotFound desc = could not find container \"1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485\": container with ID starting with 1e925d5b09355e7bd8f8b1f29418100507c6fc71ae60751e66728e346947e485 not found: ID does not exist" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.006845 4943 scope.go:117] "RemoveContainer" containerID="7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.007209 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316\": container with ID starting with 7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316 not found: ID does not exist" containerID="7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.007236 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316"} err="failed to get container status \"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316\": rpc error: code = NotFound desc = could not find container \"7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316\": container with ID starting with 7c134765ffe762897db45bd4487592f3e7717d72ab1fc4b6d6d643e531ebd316 not found: ID does not exist" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.007249 4943 scope.go:117] "RemoveContainer" containerID="c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.007628 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398\": container with ID starting with c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398 not found: ID does not exist" containerID="c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.007687 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398"} err="failed to get container status \"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398\": rpc error: code = NotFound desc = could not find container \"c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398\": container with ID starting with c80f400d308ba3408c091a3389ea7d5f704ef19259c4d8fdf2b114088d1cb398 not found: ID does not exist" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.141372 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.148357 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172151 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.172607 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-notification-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172633 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-notification-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.172658 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="sg-core" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172669 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="sg-core" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.172690 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-central-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172698 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-central-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: E1129 07:14:04.172719 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="proxy-httpd" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172727 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="proxy-httpd" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172961 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="proxy-httpd" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.172984 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-notification-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.173009 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="sg-core" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.173040 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" containerName="ceilometer-central-agent" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.175465 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.179225 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.187677 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.187718 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.192127 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259637 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259709 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259789 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wffs2\" (UniqueName: \"kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259874 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259908 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259944 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.259989 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.260041 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.361653 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.361941 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.361980 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362018 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362065 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362097 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362123 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362181 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wffs2\" (UniqueName: \"kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362636 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.362893 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.367986 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.368280 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.368796 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.370682 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.371661 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.383522 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wffs2\" (UniqueName: \"kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2\") pod \"ceilometer-0\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.490897 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-sf2xk"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.492195 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.494547 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.494787 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.499179 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-sf2xk"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.507794 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.565067 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.565199 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg579\" (UniqueName: \"kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.565370 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.565499 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.667415 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.667500 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg579\" (UniqueName: \"kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.667597 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.667714 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.674377 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.674491 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.679036 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.706941 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg579\" (UniqueName: \"kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579\") pod \"nova-cell0-cell-mapping-sf2xk\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.724803 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.726341 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.733061 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.759776 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.843272 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.905062 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.905138 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.905212 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm4js\" (UniqueName: \"kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.905242 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.917818 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.921841 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.935911 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 07:14:04 crc kubenswrapper[4943]: I1129 07:14:04.982168 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006551 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006616 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006643 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4qjv\" (UniqueName: \"kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006691 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm4js\" (UniqueName: \"kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006714 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006754 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006771 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.006911 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.017439 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.017874 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.018900 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.024013 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.029422 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.037634 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.043984 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.053991 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm4js\" (UniqueName: \"kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js\") pod \"nova-api-0\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.062860 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.063112 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="dnsmasq-dns" containerID="cri-o://16572ee100e338426e5ebf2d681db7cc64e71e138f5a43f44bc33c9c1ecbbacb" gracePeriod=10 Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.071340 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.072546 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.075142 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.079305 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.082174 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.106189 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.109908 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.110888 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.110944 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.110972 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vc42\" (UniqueName: \"kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111004 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4qjv\" (UniqueName: \"kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111056 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111078 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111109 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111127 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111151 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwcgd\" (UniqueName: \"kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.111957 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.115788 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.123881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.143091 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.147675 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4qjv\" (UniqueName: \"kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv\") pod \"nova-metadata-0\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.165110 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212520 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212580 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212616 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212709 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212730 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjtlf\" (UniqueName: \"kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212755 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwcgd\" (UniqueName: \"kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212790 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212817 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212857 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212887 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.212911 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vc42\" (UniqueName: \"kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.218420 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.224317 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.235930 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.257192 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.273406 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.278330 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.285431 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwcgd\" (UniqueName: \"kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd\") pod \"nova-scheduler-0\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.286607 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.298295 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vc42\" (UniqueName: \"kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42\") pod \"nova-cell1-novncproxy-0\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.301219 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.314717 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.321886 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322110 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322132 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjtlf\" (UniqueName: \"kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322182 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx5m4\" (UniqueName: \"kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322231 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.322257 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.328917 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.335514 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.350690 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.351905 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjtlf\" (UniqueName: \"kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.362331 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.369848 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-7hs8x\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.376849 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.384430 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a2a3e00-c497-473a-985a-f406a6a16604" path="/var/lib/kubelet/pods/0a2a3e00-c497-473a-985a-f406a6a16604/volumes" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.425580 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.435596 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.436005 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx5m4\" (UniqueName: \"kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.427203 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.440868 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.463460 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx5m4\" (UniqueName: \"kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4\") pod \"certified-operators-9s2lw\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.528352 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.552349 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.657039 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:05 crc kubenswrapper[4943]: I1129 07:14:05.872403 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-sf2xk"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.001589 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerStarted","Data":"58758ddd77cadb7c1da305ab14cfde04789d1bded933ec60c2cb13841f7758ea"} Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.014248 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4qtlw"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.015020 4943 generic.go:334] "Generic (PLEG): container finished" podID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerID="16572ee100e338426e5ebf2d681db7cc64e71e138f5a43f44bc33c9c1ecbbacb" exitCode=0 Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.018051 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" event={"ID":"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6","Type":"ContainerDied","Data":"16572ee100e338426e5ebf2d681db7cc64e71e138f5a43f44bc33c9c1ecbbacb"} Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.018271 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.026611 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.026793 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.035844 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-sf2xk" event={"ID":"e88bdece-8370-4e3e-9127-a932d4452f5b","Type":"ContainerStarted","Data":"2c16c8461825c8c8776ccddd3eeb85569eb822bc0bec7b0b1fd549668b1f65c9"} Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.040951 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4qtlw"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.076291 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.076400 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.076711 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.076767 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6ntv\" (UniqueName: \"kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.153391 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.179701 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.179775 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.179888 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.179924 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6ntv\" (UniqueName: \"kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.200635 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.201336 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.203119 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.232151 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6ntv\" (UniqueName: \"kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv\") pod \"nova-cell1-conductor-db-sync-4qtlw\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.413703 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.433873 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.523858 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.594082 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb\") pod \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.594199 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc\") pod \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.594260 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config\") pod \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.594350 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd4m5\" (UniqueName: \"kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5\") pod \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.594412 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb\") pod \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\" (UID: \"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6\") " Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.604773 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5" (OuterVolumeSpecName: "kube-api-access-bd4m5") pod "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" (UID: "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6"). InnerVolumeSpecName "kube-api-access-bd4m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.670776 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.676949 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" (UID: "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.677863 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" (UID: "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.682972 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config" (OuterVolumeSpecName: "config") pod "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" (UID: "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.698173 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.699781 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd4m5\" (UniqueName: \"kubernetes.io/projected/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-kube-api-access-bd4m5\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.699799 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.699811 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.709354 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" (UID: "ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.788677 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.801506 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.822242 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:06 crc kubenswrapper[4943]: W1129 07:14:06.825815 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2ea1d69_a79d_45c0_8c74_c394cedf30ce.slice/crio-90cfd339e4c23e711eb24ae1a7ce924f6e333be42c74479706d09d255725cd31 WatchSource:0}: Error finding container 90cfd339e4c23e711eb24ae1a7ce924f6e333be42c74479706d09d255725cd31: Status 404 returned error can't find the container with id 90cfd339e4c23e711eb24ae1a7ce924f6e333be42c74479706d09d255725cd31 Nov 29 07:14:06 crc kubenswrapper[4943]: I1129 07:14:06.847331 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.046577 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerStarted","Data":"cdc1f0d4f4f574dcf778fdf6a0c420b8893747e3c8b26f151728c6aea2e70111"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.056490 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerStarted","Data":"a0856af149fc448fdfcf7ce807437eab7a5e66614eae8229e132154c5fe4582f"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.060214 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" event={"ID":"4c770ec3-c23e-4afe-accb-3ad3b4aded53","Type":"ContainerStarted","Data":"829707d599fad6fa41964fda9a606a0c49d100b35387868de116a6c9974c0a75"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.061403 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2768e1a6-f9ab-4039-b6b9-f3593613e0cd","Type":"ContainerStarted","Data":"b68b7fb284305e779433a4017847eed7a3ed651ca2bb63fca47e3f6b4e1d4b73"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.065249 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" event={"ID":"ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6","Type":"ContainerDied","Data":"221ae234d08a773798e2eaad365a4b533dd3bd06d62b8ca2bbdd8156025982dd"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.065288 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-68vzd" Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.065319 4943 scope.go:117] "RemoveContainer" containerID="16572ee100e338426e5ebf2d681db7cc64e71e138f5a43f44bc33c9c1ecbbacb" Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.073837 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-sf2xk" event={"ID":"e88bdece-8370-4e3e-9127-a932d4452f5b","Type":"ContainerStarted","Data":"f31e75c0f1ca180fa5447d8b86062d81137a5c8467e221f4fcd473d25403794a"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.084783 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c2ea1d69-a79d-45c0-8c74-c394cedf30ce","Type":"ContainerStarted","Data":"90cfd339e4c23e711eb24ae1a7ce924f6e333be42c74479706d09d255725cd31"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.088161 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerStarted","Data":"8811b0207564e01e605a46ce15a5b527769849216decf3b747167cee5cf19043"} Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.096032 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4qtlw"] Nov 29 07:14:07 crc kubenswrapper[4943]: W1129 07:14:07.100222 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3fda5f5_7547_4aec_b6ce_55b5b7434e86.slice/crio-544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c WatchSource:0}: Error finding container 544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c: Status 404 returned error can't find the container with id 544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.102707 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-sf2xk" podStartSLOduration=3.102687737 podStartE2EDuration="3.102687737s" podCreationTimestamp="2025-11-29 07:14:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:07.092842125 +0000 UTC m=+2422.022930878" watchObservedRunningTime="2025-11-29 07:14:07.102687737 +0000 UTC m=+2422.032776490" Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.115289 4943 scope.go:117] "RemoveContainer" containerID="3a3e83868e2c960e2a4944de6d844be230c6fef614ff2782108accea3670e54b" Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.155116 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.163767 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-68vzd"] Nov 29 07:14:07 crc kubenswrapper[4943]: I1129 07:14:07.343259 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" path="/var/lib/kubelet/pods/ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6/volumes" Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.115053 4943 generic.go:334] "Generic (PLEG): container finished" podID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerID="9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c" exitCode=0 Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.116016 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" event={"ID":"4c770ec3-c23e-4afe-accb-3ad3b4aded53","Type":"ContainerDied","Data":"9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c"} Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.146548 4943 generic.go:334] "Generic (PLEG): container finished" podID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerID="565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d" exitCode=0 Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.146631 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerDied","Data":"565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d"} Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.163050 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerStarted","Data":"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00"} Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.176625 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" event={"ID":"b3fda5f5-7547-4aec-b6ce-55b5b7434e86","Type":"ContainerStarted","Data":"cd47a9ba2912637f0e86177785d721d55cb1fe91a746f124c3b58cb6eab8b3bc"} Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.176684 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" event={"ID":"b3fda5f5-7547-4aec-b6ce-55b5b7434e86","Type":"ContainerStarted","Data":"544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c"} Nov 29 07:14:08 crc kubenswrapper[4943]: I1129 07:14:08.293674 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" podStartSLOduration=3.293656039 podStartE2EDuration="3.293656039s" podCreationTimestamp="2025-11-29 07:14:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:08.244922707 +0000 UTC m=+2423.175011460" watchObservedRunningTime="2025-11-29 07:14:08.293656039 +0000 UTC m=+2423.223744792" Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.148262 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.178793 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.199830 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" event={"ID":"4c770ec3-c23e-4afe-accb-3ad3b4aded53","Type":"ContainerStarted","Data":"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7"} Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.203110 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerStarted","Data":"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846"} Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.206308 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerStarted","Data":"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024"} Nov 29 07:14:09 crc kubenswrapper[4943]: I1129 07:14:09.255286 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" podStartSLOduration=4.255256953 podStartE2EDuration="4.255256953s" podCreationTimestamp="2025-11-29 07:14:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:09.228085563 +0000 UTC m=+2424.158174316" watchObservedRunningTime="2025-11-29 07:14:09.255256953 +0000 UTC m=+2424.185345706" Nov 29 07:14:10 crc kubenswrapper[4943]: I1129 07:14:10.216837 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.225205 4943 generic.go:334] "Generic (PLEG): container finished" podID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerID="5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846" exitCode=0 Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.226190 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerDied","Data":"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846"} Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.358068 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.358705 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="d34120b5-7d6c-4fc5-a431-45bcfac00231" containerName="nova-cell0-conductor-conductor" containerID="cri-o://4fc53b76db4c87e7a48a703ab7e0aaa7c17895117a9d229a83d1c92eb70d969c" gracePeriod=30 Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.366497 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:11 crc kubenswrapper[4943]: I1129 07:14:11.410411 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:12 crc kubenswrapper[4943]: I1129 07:14:12.241157 4943 generic.go:334] "Generic (PLEG): container finished" podID="d34120b5-7d6c-4fc5-a431-45bcfac00231" containerID="4fc53b76db4c87e7a48a703ab7e0aaa7c17895117a9d229a83d1c92eb70d969c" exitCode=0 Nov 29 07:14:12 crc kubenswrapper[4943]: I1129 07:14:12.241213 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d34120b5-7d6c-4fc5-a431-45bcfac00231","Type":"ContainerDied","Data":"4fc53b76db4c87e7a48a703ab7e0aaa7c17895117a9d229a83d1c92eb70d969c"} Nov 29 07:14:12 crc kubenswrapper[4943]: I1129 07:14:12.312504 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:14:12 crc kubenswrapper[4943]: I1129 07:14:12.364548 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hqj99" Nov 29 07:14:12 crc kubenswrapper[4943]: I1129 07:14:12.886198 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.270692 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerStarted","Data":"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df"} Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.341639 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.382022 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.400148 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle\") pod \"d34120b5-7d6c-4fc5-a431-45bcfac00231\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.400349 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data\") pod \"d34120b5-7d6c-4fc5-a431-45bcfac00231\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.400504 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhc7w\" (UniqueName: \"kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w\") pod \"d34120b5-7d6c-4fc5-a431-45bcfac00231\" (UID: \"d34120b5-7d6c-4fc5-a431-45bcfac00231\") " Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.437392 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w" (OuterVolumeSpecName: "kube-api-access-rhc7w") pod "d34120b5-7d6c-4fc5-a431-45bcfac00231" (UID: "d34120b5-7d6c-4fc5-a431-45bcfac00231"). InnerVolumeSpecName "kube-api-access-rhc7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.495925 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d34120b5-7d6c-4fc5-a431-45bcfac00231" (UID: "d34120b5-7d6c-4fc5-a431-45bcfac00231"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.496733 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data" (OuterVolumeSpecName: "config-data") pod "d34120b5-7d6c-4fc5-a431-45bcfac00231" (UID: "d34120b5-7d6c-4fc5-a431-45bcfac00231"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.502910 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhc7w\" (UniqueName: \"kubernetes.io/projected/d34120b5-7d6c-4fc5-a431-45bcfac00231-kube-api-access-rhc7w\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.503074 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.503164 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d34120b5-7d6c-4fc5-a431-45bcfac00231-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.549193 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 07:14:13 crc kubenswrapper[4943]: I1129 07:14:13.550267 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mhsb2" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="registry-server" containerID="cri-o://1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3" gracePeriod=2 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.278851 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerStarted","Data":"a8705fbebd45121b8ff43f0350b58ef1afe32c6041f75bf8f42c4d9fc8ec5b75"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.280931 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerStarted","Data":"9579224b72910900fee377ca3da9e56348db08187819fc76a9e8755fba0a2f26"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.281025 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d34120b5-7d6c-4fc5-a431-45bcfac00231","Type":"ContainerDied","Data":"4d269cc661492267c954ff217cde78e3074497c3502e3df5d5eaa77dd0ef7307"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.281121 4943 scope.go:117] "RemoveContainer" containerID="4fc53b76db4c87e7a48a703ab7e0aaa7c17895117a9d229a83d1c92eb70d969c" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.279038 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-log" containerID="cri-o://9579224b72910900fee377ca3da9e56348db08187819fc76a9e8755fba0a2f26" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.279111 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-api" containerID="cri-o://a8705fbebd45121b8ff43f0350b58ef1afe32c6041f75bf8f42c4d9fc8ec5b75" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.280378 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.284770 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2768e1a6-f9ab-4039-b6b9-f3593613e0cd","Type":"ContainerStarted","Data":"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.285355 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" containerName="nova-scheduler-scheduler" containerID="cri-o://31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.298583 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c2ea1d69-a79d-45c0-8c74-c394cedf30ce","Type":"ContainerStarted","Data":"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.298711 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.314753 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.958217234 podStartE2EDuration="10.314709204s" podCreationTimestamp="2025-11-29 07:14:04 +0000 UTC" firstStartedPulling="2025-11-29 07:14:06.570378904 +0000 UTC m=+2421.500467657" lastFinishedPulling="2025-11-29 07:14:12.926870874 +0000 UTC m=+2427.856959627" observedRunningTime="2025-11-29 07:14:14.307331702 +0000 UTC m=+2429.237420445" watchObservedRunningTime="2025-11-29 07:14:14.314709204 +0000 UTC m=+2429.244797957" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.318711 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerStarted","Data":"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.322817 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.322951 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerStarted","Data":"36ec77c1654c36e247f99bb747bc99facc852ff38dd9f6158c042ae4e3b4ab4f"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.322987 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerStarted","Data":"14925d34d9411cc36a3c56e8e38a863b3283ac1d09b3f502f698e0a7cbe12a1e"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.323079 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-log" containerID="cri-o://14925d34d9411cc36a3c56e8e38a863b3283ac1d09b3f502f698e0a7cbe12a1e" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.323168 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-metadata" containerID="cri-o://36ec77c1654c36e247f99bb747bc99facc852ff38dd9f6158c042ae4e3b4ab4f" gracePeriod=30 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.328734 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.230894532 podStartE2EDuration="9.32871298s" podCreationTimestamp="2025-11-29 07:14:05 +0000 UTC" firstStartedPulling="2025-11-29 07:14:06.830640735 +0000 UTC m=+2421.760729478" lastFinishedPulling="2025-11-29 07:14:12.928459173 +0000 UTC m=+2427.858547926" observedRunningTime="2025-11-29 07:14:14.325956522 +0000 UTC m=+2429.256045265" watchObservedRunningTime="2025-11-29 07:14:14.32871298 +0000 UTC m=+2429.258801733" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.329713 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.330087 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.341852 4943 generic.go:334] "Generic (PLEG): container finished" podID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerID="1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3" exitCode=0 Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.342403 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mhsb2" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.343314 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerDied","Data":"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.343349 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mhsb2" event={"ID":"6721f85f-7d2a-4877-ae05-b2bc850bd10e","Type":"ContainerDied","Data":"ecc22e9e4c02bdb664c3d82f0cf78c08e13fbdb8d943534247a9727f49c7c95a"} Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.343390 4943 scope.go:117] "RemoveContainer" containerID="1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.355306 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.251693774 podStartE2EDuration="10.355282485s" podCreationTimestamp="2025-11-29 07:14:04 +0000 UTC" firstStartedPulling="2025-11-29 07:14:06.803347912 +0000 UTC m=+2421.733436665" lastFinishedPulling="2025-11-29 07:14:12.906936623 +0000 UTC m=+2427.837025376" observedRunningTime="2025-11-29 07:14:14.349943463 +0000 UTC m=+2429.280032216" watchObservedRunningTime="2025-11-29 07:14:14.355282485 +0000 UTC m=+2429.285371238" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.369586 4943 scope.go:117] "RemoveContainer" containerID="38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.453423 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.764107616 podStartE2EDuration="10.453404286s" podCreationTimestamp="2025-11-29 07:14:04 +0000 UTC" firstStartedPulling="2025-11-29 07:14:06.238698922 +0000 UTC m=+2421.168787675" lastFinishedPulling="2025-11-29 07:14:12.927995592 +0000 UTC m=+2427.858084345" observedRunningTime="2025-11-29 07:14:14.443709237 +0000 UTC m=+2429.373797990" watchObservedRunningTime="2025-11-29 07:14:14.453404286 +0000 UTC m=+2429.383493039" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.462051 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content\") pod \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.462089 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities\") pod \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.462216 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngbs6\" (UniqueName: \"kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6\") pod \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\" (UID: \"6721f85f-7d2a-4877-ae05-b2bc850bd10e\") " Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.463860 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities" (OuterVolumeSpecName: "utilities") pod "6721f85f-7d2a-4877-ae05-b2bc850bd10e" (UID: "6721f85f-7d2a-4877-ae05-b2bc850bd10e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.474728 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6" (OuterVolumeSpecName: "kube-api-access-ngbs6") pod "6721f85f-7d2a-4877-ae05-b2bc850bd10e" (UID: "6721f85f-7d2a-4877-ae05-b2bc850bd10e"). InnerVolumeSpecName "kube-api-access-ngbs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.504290 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9s2lw" podStartSLOduration=4.727761211 podStartE2EDuration="9.504271441s" podCreationTimestamp="2025-11-29 07:14:05 +0000 UTC" firstStartedPulling="2025-11-29 07:14:08.155614824 +0000 UTC m=+2423.085703577" lastFinishedPulling="2025-11-29 07:14:12.932125054 +0000 UTC m=+2427.862213807" observedRunningTime="2025-11-29 07:14:14.499879692 +0000 UTC m=+2429.429968445" watchObservedRunningTime="2025-11-29 07:14:14.504271441 +0000 UTC m=+2429.434360194" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.534253 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.554359 4943 scope.go:117] "RemoveContainer" containerID="069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.566107 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.566140 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngbs6\" (UniqueName: \"kubernetes.io/projected/6721f85f-7d2a-4877-ae05-b2bc850bd10e-kube-api-access-ngbs6\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.588724 4943 scope.go:117] "RemoveContainer" containerID="1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.593727 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3\": container with ID starting with 1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3 not found: ID does not exist" containerID="1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.593777 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3"} err="failed to get container status \"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3\": rpc error: code = NotFound desc = could not find container \"1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3\": container with ID starting with 1efe355270a7e65619c0e3783afd28f6a8da70f41dc1f60b19ab43265d99c8c3 not found: ID does not exist" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.593802 4943 scope.go:117] "RemoveContainer" containerID="38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.597654 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.599154 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec\": container with ID starting with 38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec not found: ID does not exist" containerID="38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.599190 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec"} err="failed to get container status \"38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec\": rpc error: code = NotFound desc = could not find container \"38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec\": container with ID starting with 38a6b68d2b91412ba4a86996144a10a83ad5404e026628892392f5a75b2b22ec not found: ID does not exist" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.599215 4943 scope.go:117] "RemoveContainer" containerID="069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.605703 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736\": container with ID starting with 069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736 not found: ID does not exist" containerID="069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.605754 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736"} err="failed to get container status \"069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736\": rpc error: code = NotFound desc = could not find container \"069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736\": container with ID starting with 069436a340ed74c4bbe1e1781e9ab409cd0c2fb97fba3b7d2ae5d32b14da1736 not found: ID does not exist" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.621634 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622076 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="dnsmasq-dns" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622096 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="dnsmasq-dns" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622113 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="extract-utilities" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622119 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="extract-utilities" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622130 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="extract-content" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622137 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="extract-content" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622145 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="registry-server" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622152 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="registry-server" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622169 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="init" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622175 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="init" Nov 29 07:14:14 crc kubenswrapper[4943]: E1129 07:14:14.622186 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d34120b5-7d6c-4fc5-a431-45bcfac00231" containerName="nova-cell0-conductor-conductor" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622192 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d34120b5-7d6c-4fc5-a431-45bcfac00231" containerName="nova-cell0-conductor-conductor" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622358 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d34120b5-7d6c-4fc5-a431-45bcfac00231" containerName="nova-cell0-conductor-conductor" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622373 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" containerName="registry-server" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622395 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea676f5f-c3c7-4fd3-9e4c-3113ab363ca6" containerName="dnsmasq-dns" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.622957 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6721f85f-7d2a-4877-ae05-b2bc850bd10e" (UID: "6721f85f-7d2a-4877-ae05-b2bc850bd10e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.623012 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.628899 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.638907 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.667521 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6721f85f-7d2a-4877-ae05-b2bc850bd10e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.696826 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.710076 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mhsb2"] Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.773336 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.773488 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8l2f\" (UniqueName: \"kubernetes.io/projected/3e86cce9-f739-49de-8b00-6ad1ae54d725-kube-api-access-c8l2f\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.773609 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.874998 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.875100 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8l2f\" (UniqueName: \"kubernetes.io/projected/3e86cce9-f739-49de-8b00-6ad1ae54d725-kube-api-access-c8l2f\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.875147 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.880193 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.882458 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e86cce9-f739-49de-8b00-6ad1ae54d725-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.898114 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8l2f\" (UniqueName: \"kubernetes.io/projected/3e86cce9-f739-49de-8b00-6ad1ae54d725-kube-api-access-c8l2f\") pod \"nova-cell0-conductor-0\" (UID: \"3e86cce9-f739-49de-8b00-6ad1ae54d725\") " pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:14 crc kubenswrapper[4943]: I1129 07:14:14.955015 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.165362 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.165414 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.304020 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.355930 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6721f85f-7d2a-4877-ae05-b2bc850bd10e" path="/var/lib/kubelet/pods/6721f85f-7d2a-4877-ae05-b2bc850bd10e/volumes" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.357514 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d34120b5-7d6c-4fc5-a431-45bcfac00231" path="/var/lib/kubelet/pods/d34120b5-7d6c-4fc5-a431-45bcfac00231/volumes" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.405746 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.478203 4943 generic.go:334] "Generic (PLEG): container finished" podID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerID="36ec77c1654c36e247f99bb747bc99facc852ff38dd9f6158c042ae4e3b4ab4f" exitCode=0 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.478237 4943 generic.go:334] "Generic (PLEG): container finished" podID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerID="14925d34d9411cc36a3c56e8e38a863b3283ac1d09b3f502f698e0a7cbe12a1e" exitCode=143 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.478298 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerDied","Data":"36ec77c1654c36e247f99bb747bc99facc852ff38dd9f6158c042ae4e3b4ab4f"} Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.478322 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerDied","Data":"14925d34d9411cc36a3c56e8e38a863b3283ac1d09b3f502f698e0a7cbe12a1e"} Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.505818 4943 generic.go:334] "Generic (PLEG): container finished" podID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerID="a8705fbebd45121b8ff43f0350b58ef1afe32c6041f75bf8f42c4d9fc8ec5b75" exitCode=0 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.506117 4943 generic.go:334] "Generic (PLEG): container finished" podID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerID="9579224b72910900fee377ca3da9e56348db08187819fc76a9e8755fba0a2f26" exitCode=143 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.505999 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerDied","Data":"a8705fbebd45121b8ff43f0350b58ef1afe32c6041f75bf8f42c4d9fc8ec5b75"} Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.506179 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerDied","Data":"9579224b72910900fee377ca3da9e56348db08187819fc76a9e8755fba0a2f26"} Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.541221 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.541456 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="dnsmasq-dns" containerID="cri-o://9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d" gracePeriod=10 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.553330 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.561743 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-central-agent" containerID="cri-o://164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00" gracePeriod=30 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.561869 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.562111 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="proxy-httpd" containerID="cri-o://a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c" gracePeriod=30 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.562185 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-notification-agent" containerID="cri-o://f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024" gracePeriod=30 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.562203 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="sg-core" containerID="cri-o://6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df" gracePeriod=30 Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.562370 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.607240 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data\") pod \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.607324 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs\") pod \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.607443 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4qjv\" (UniqueName: \"kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv\") pod \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.607461 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle\") pod \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\" (UID: \"57760419-372d-4b83-b5d3-f7f6afc6a5c7\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.625805 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs" (OuterVolumeSpecName: "logs") pod "57760419-372d-4b83-b5d3-f7f6afc6a5c7" (UID: "57760419-372d-4b83-b5d3-f7f6afc6a5c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.628861 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv" (OuterVolumeSpecName: "kube-api-access-q4qjv") pod "57760419-372d-4b83-b5d3-f7f6afc6a5c7" (UID: "57760419-372d-4b83-b5d3-f7f6afc6a5c7"). InnerVolumeSpecName "kube-api-access-q4qjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.660326 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.660394 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.677020 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.43363119 podStartE2EDuration="11.676998522s" podCreationTimestamp="2025-11-29 07:14:04 +0000 UTC" firstStartedPulling="2025-11-29 07:14:05.55242608 +0000 UTC m=+2420.482514833" lastFinishedPulling="2025-11-29 07:14:14.795793412 +0000 UTC m=+2429.725882165" observedRunningTime="2025-11-29 07:14:15.62905127 +0000 UTC m=+2430.559140033" watchObservedRunningTime="2025-11-29 07:14:15.676998522 +0000 UTC m=+2430.607087275" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.717888 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4qjv\" (UniqueName: \"kubernetes.io/projected/57760419-372d-4b83-b5d3-f7f6afc6a5c7-kube-api-access-q4qjv\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.717931 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57760419-372d-4b83-b5d3-f7f6afc6a5c7-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.724605 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data" (OuterVolumeSpecName: "config-data") pod "57760419-372d-4b83-b5d3-f7f6afc6a5c7" (UID: "57760419-372d-4b83-b5d3-f7f6afc6a5c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.725717 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57760419-372d-4b83-b5d3-f7f6afc6a5c7" (UID: "57760419-372d-4b83-b5d3-f7f6afc6a5c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.806666 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.827132 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.827170 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57760419-372d-4b83-b5d3-f7f6afc6a5c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.928054 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle\") pod \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.928536 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm4js\" (UniqueName: \"kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js\") pod \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.928665 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data\") pod \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.928690 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs\") pod \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\" (UID: \"6082b50d-15e7-4cd4-b20e-9e982b7c08aa\") " Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.929788 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs" (OuterVolumeSpecName: "logs") pod "6082b50d-15e7-4cd4-b20e-9e982b7c08aa" (UID: "6082b50d-15e7-4cd4-b20e-9e982b7c08aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.941728 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js" (OuterVolumeSpecName: "kube-api-access-wm4js") pod "6082b50d-15e7-4cd4-b20e-9e982b7c08aa" (UID: "6082b50d-15e7-4cd4-b20e-9e982b7c08aa"). InnerVolumeSpecName "kube-api-access-wm4js". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:15 crc kubenswrapper[4943]: I1129 07:14:15.965178 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data" (OuterVolumeSpecName: "config-data") pod "6082b50d-15e7-4cd4-b20e-9e982b7c08aa" (UID: "6082b50d-15e7-4cd4-b20e-9e982b7c08aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:15.997552 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6082b50d-15e7-4cd4-b20e-9e982b7c08aa" (UID: "6082b50d-15e7-4cd4-b20e-9e982b7c08aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.032736 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.032777 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.032786 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.032798 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm4js\" (UniqueName: \"kubernetes.io/projected/6082b50d-15e7-4cd4-b20e-9e982b7c08aa-kube-api-access-wm4js\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.058912 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.222678 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.249147 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb\") pod \"49915ae8-7a1f-446e-804d-299765490b05\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.249188 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config\") pod \"49915ae8-7a1f-446e-804d-299765490b05\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.249205 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc\") pod \"49915ae8-7a1f-446e-804d-299765490b05\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.249298 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l2kv\" (UniqueName: \"kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv\") pod \"49915ae8-7a1f-446e-804d-299765490b05\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.249323 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb\") pod \"49915ae8-7a1f-446e-804d-299765490b05\" (UID: \"49915ae8-7a1f-446e-804d-299765490b05\") " Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.257768 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv" (OuterVolumeSpecName: "kube-api-access-7l2kv") pod "49915ae8-7a1f-446e-804d-299765490b05" (UID: "49915ae8-7a1f-446e-804d-299765490b05"). InnerVolumeSpecName "kube-api-access-7l2kv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.355640 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l2kv\" (UniqueName: \"kubernetes.io/projected/49915ae8-7a1f-446e-804d-299765490b05-kube-api-access-7l2kv\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.357556 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "49915ae8-7a1f-446e-804d-299765490b05" (UID: "49915ae8-7a1f-446e-804d-299765490b05"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.360019 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "49915ae8-7a1f-446e-804d-299765490b05" (UID: "49915ae8-7a1f-446e-804d-299765490b05"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.411138 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "49915ae8-7a1f-446e-804d-299765490b05" (UID: "49915ae8-7a1f-446e-804d-299765490b05"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.445169 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config" (OuterVolumeSpecName: "config") pod "49915ae8-7a1f-446e-804d-299765490b05" (UID: "49915ae8-7a1f-446e-804d-299765490b05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.459150 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.459193 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.459209 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.459223 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49915ae8-7a1f-446e-804d-299765490b05-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574657 4943 generic.go:334] "Generic (PLEG): container finished" podID="73439687-a401-401c-bbbf-48de7fed3a51" containerID="6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df" exitCode=2 Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574690 4943 generic.go:334] "Generic (PLEG): container finished" podID="73439687-a401-401c-bbbf-48de7fed3a51" containerID="f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024" exitCode=0 Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574697 4943 generic.go:334] "Generic (PLEG): container finished" podID="73439687-a401-401c-bbbf-48de7fed3a51" containerID="164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00" exitCode=0 Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574757 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerStarted","Data":"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574783 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerDied","Data":"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574793 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerDied","Data":"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.574830 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerDied","Data":"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.589724 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"57760419-372d-4b83-b5d3-f7f6afc6a5c7","Type":"ContainerDied","Data":"cdc1f0d4f4f574dcf778fdf6a0c420b8893747e3c8b26f151728c6aea2e70111"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.589788 4943 scope.go:117] "RemoveContainer" containerID="36ec77c1654c36e247f99bb747bc99facc852ff38dd9f6158c042ae4e3b4ab4f" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.589961 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.610328 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6082b50d-15e7-4cd4-b20e-9e982b7c08aa","Type":"ContainerDied","Data":"a0856af149fc448fdfcf7ce807437eab7a5e66614eae8229e132154c5fe4582f"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.610357 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.617601 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3e86cce9-f739-49de-8b00-6ad1ae54d725","Type":"ContainerStarted","Data":"7ea6c2dde594e49a5ada5e724f9da937369f4696a45d71750ca4a77dc1a6a1f9"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.617644 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3e86cce9-f739-49de-8b00-6ad1ae54d725","Type":"ContainerStarted","Data":"8c4e2d422f9ad9fb516a8e57a0f2307c59c1269c6bf2e575bb021a00afa9e704"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.618744 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.630908 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.631071 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" event={"ID":"49915ae8-7a1f-446e-804d-299765490b05","Type":"ContainerDied","Data":"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.631249 4943 generic.go:334] "Generic (PLEG): container finished" podID="49915ae8-7a1f-446e-804d-299765490b05" containerID="9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d" exitCode=0 Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.631388 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b76cdf485-lmsb6" event={"ID":"49915ae8-7a1f-446e-804d-299765490b05","Type":"ContainerDied","Data":"6b6a3f3f63ea94a0dc9b52c2b69accdcb355efe91e7f579fd154a3495487092d"} Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.639349 4943 scope.go:117] "RemoveContainer" containerID="14925d34d9411cc36a3c56e8e38a863b3283ac1d09b3f502f698e0a7cbe12a1e" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.639787 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.668453 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.683675 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.684491 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-log" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.684521 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-log" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.694605 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="dnsmasq-dns" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.694721 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="dnsmasq-dns" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.694859 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-log" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.694872 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-log" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.694899 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-api" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.694911 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-api" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.694956 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="init" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.694967 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="init" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.694995 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-metadata" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695009 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-metadata" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695609 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="49915ae8-7a1f-446e-804d-299765490b05" containerName="dnsmasq-dns" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695632 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-log" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695663 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-api" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695680 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" containerName="nova-api-log" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.695695 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" containerName="nova-metadata-metadata" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.697679 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.700736 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.701326 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.708503 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.70847254 podStartE2EDuration="2.70847254s" podCreationTimestamp="2025-11-29 07:14:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:16.667297284 +0000 UTC m=+2431.597386037" watchObservedRunningTime="2025-11-29 07:14:16.70847254 +0000 UTC m=+2431.638561293" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.725981 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.738988 4943 scope.go:117] "RemoveContainer" containerID="a8705fbebd45121b8ff43f0350b58ef1afe32c6041f75bf8f42c4d9fc8ec5b75" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.752799 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.753486 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-9s2lw" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="registry-server" probeResult="failure" output=< Nov 29 07:14:16 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:14:16 crc kubenswrapper[4943]: > Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.768925 4943 scope.go:117] "RemoveContainer" containerID="9579224b72910900fee377ca3da9e56348db08187819fc76a9e8755fba0a2f26" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.789337 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.804930 4943 scope.go:117] "RemoveContainer" containerID="9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.810117 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.841595 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b76cdf485-lmsb6"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.853795 4943 scope.go:117] "RemoveContainer" containerID="5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.873734 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.874179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhwm9\" (UniqueName: \"kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.874267 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.874293 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.874327 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.878765 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.881367 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.887272 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.919648 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.976907 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcpdx\" (UniqueName: \"kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.976991 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977034 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977075 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977095 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977151 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhwm9\" (UniqueName: \"kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977230 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977250 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.977279 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.979037 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.979141 4943 scope.go:117] "RemoveContainer" containerID="9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.985515 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.985659 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d\": container with ID starting with 9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d not found: ID does not exist" containerID="9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.985728 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d"} err="failed to get container status \"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d\": rpc error: code = NotFound desc = could not find container \"9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d\": container with ID starting with 9df2673ffd18862915600353662f8a630c2b9593ded8645fd8a8c333180b2b9d not found: ID does not exist" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.985791 4943 scope.go:117] "RemoveContainer" containerID="5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.987910 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:16 crc kubenswrapper[4943]: E1129 07:14:16.988087 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4\": container with ID starting with 5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4 not found: ID does not exist" containerID="5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.988148 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4"} err="failed to get container status \"5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4\": rpc error: code = NotFound desc = could not find container \"5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4\": container with ID starting with 5f78ea5a116b7e5447fa9aae24bcda8c40a7dd44caf723f336f75349fbca61c4 not found: ID does not exist" Nov 29 07:14:16 crc kubenswrapper[4943]: I1129 07:14:16.988878 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.007856 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhwm9\" (UniqueName: \"kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9\") pod \"nova-metadata-0\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " pod="openstack/nova-metadata-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.049405 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.078477 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcpdx\" (UniqueName: \"kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.078597 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.078645 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.078665 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.079820 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.083111 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.087669 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.107256 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcpdx\" (UniqueName: \"kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx\") pod \"nova-api-0\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.127067 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.356075 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49915ae8-7a1f-446e-804d-299765490b05" path="/var/lib/kubelet/pods/49915ae8-7a1f-446e-804d-299765490b05/volumes" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.357419 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57760419-372d-4b83-b5d3-f7f6afc6a5c7" path="/var/lib/kubelet/pods/57760419-372d-4b83-b5d3-f7f6afc6a5c7/volumes" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.366346 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6082b50d-15e7-4cd4-b20e-9e982b7c08aa" path="/var/lib/kubelet/pods/6082b50d-15e7-4cd4-b20e-9e982b7c08aa/volumes" Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.699170 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:17 crc kubenswrapper[4943]: I1129 07:14:17.815763 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:17 crc kubenswrapper[4943]: W1129 07:14:17.819089 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22dac217_35ea_4303_8f79_785445fbb0cd.slice/crio-fca8c7754fe0fbb89066e63c3d5c1074a8916bd28309ada1efe738d1c83a33b8 WatchSource:0}: Error finding container fca8c7754fe0fbb89066e63c3d5c1074a8916bd28309ada1efe738d1c83a33b8: Status 404 returned error can't find the container with id fca8c7754fe0fbb89066e63c3d5c1074a8916bd28309ada1efe738d1c83a33b8 Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.658779 4943 generic.go:334] "Generic (PLEG): container finished" podID="e88bdece-8370-4e3e-9127-a932d4452f5b" containerID="f31e75c0f1ca180fa5447d8b86062d81137a5c8467e221f4fcd473d25403794a" exitCode=0 Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.659088 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-sf2xk" event={"ID":"e88bdece-8370-4e3e-9127-a932d4452f5b","Type":"ContainerDied","Data":"f31e75c0f1ca180fa5447d8b86062d81137a5c8467e221f4fcd473d25403794a"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.662721 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerStarted","Data":"4877e4985b0e11a7f3957ee40d34e3adcd8ba6ee8f6f765d7d0b4d0e85144085"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.662766 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerStarted","Data":"1fad52e7f2875946b4e39377d80beb50276c171b7ea4acf167152c4be41a02e4"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.662777 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerStarted","Data":"797c26501d1606f60aadfbf8731a6ae5cd2ea9c510ce093aacb031d6b095f92a"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.666347 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerStarted","Data":"0aed50fab0a5cd68095745c6e4662cd55debf1c477a06ceefcb7a16c156423c6"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.666378 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerStarted","Data":"6b7c2f0460223d05d2f606d871db157b9a9ff2795666b0fefb95d5a3774f8f0f"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.666406 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerStarted","Data":"fca8c7754fe0fbb89066e63c3d5c1074a8916bd28309ada1efe738d1c83a33b8"} Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.702556 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.702537745 podStartE2EDuration="2.702537745s" podCreationTimestamp="2025-11-29 07:14:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:18.695166393 +0000 UTC m=+2433.625255156" watchObservedRunningTime="2025-11-29 07:14:18.702537745 +0000 UTC m=+2433.632626498" Nov 29 07:14:18 crc kubenswrapper[4943]: I1129 07:14:18.717845 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.717826002 podStartE2EDuration="2.717826002s" podCreationTimestamp="2025-11-29 07:14:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:18.71284038 +0000 UTC m=+2433.642929133" watchObservedRunningTime="2025-11-29 07:14:18.717826002 +0000 UTC m=+2433.647914755" Nov 29 07:14:19 crc kubenswrapper[4943]: I1129 07:14:19.675767 4943 generic.go:334] "Generic (PLEG): container finished" podID="b3fda5f5-7547-4aec-b6ce-55b5b7434e86" containerID="cd47a9ba2912637f0e86177785d721d55cb1fe91a746f124c3b58cb6eab8b3bc" exitCode=0 Nov 29 07:14:19 crc kubenswrapper[4943]: I1129 07:14:19.675832 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" event={"ID":"b3fda5f5-7547-4aec-b6ce-55b5b7434e86","Type":"ContainerDied","Data":"cd47a9ba2912637f0e86177785d721d55cb1fe91a746f124c3b58cb6eab8b3bc"} Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.050742 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.143369 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dg579\" (UniqueName: \"kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579\") pod \"e88bdece-8370-4e3e-9127-a932d4452f5b\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.148717 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579" (OuterVolumeSpecName: "kube-api-access-dg579") pod "e88bdece-8370-4e3e-9127-a932d4452f5b" (UID: "e88bdece-8370-4e3e-9127-a932d4452f5b"). InnerVolumeSpecName "kube-api-access-dg579". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.245099 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data\") pod \"e88bdece-8370-4e3e-9127-a932d4452f5b\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.245179 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts\") pod \"e88bdece-8370-4e3e-9127-a932d4452f5b\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.245241 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle\") pod \"e88bdece-8370-4e3e-9127-a932d4452f5b\" (UID: \"e88bdece-8370-4e3e-9127-a932d4452f5b\") " Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.245833 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dg579\" (UniqueName: \"kubernetes.io/projected/e88bdece-8370-4e3e-9127-a932d4452f5b-kube-api-access-dg579\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.250332 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts" (OuterVolumeSpecName: "scripts") pod "e88bdece-8370-4e3e-9127-a932d4452f5b" (UID: "e88bdece-8370-4e3e-9127-a932d4452f5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.271429 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e88bdece-8370-4e3e-9127-a932d4452f5b" (UID: "e88bdece-8370-4e3e-9127-a932d4452f5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.275518 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data" (OuterVolumeSpecName: "config-data") pod "e88bdece-8370-4e3e-9127-a932d4452f5b" (UID: "e88bdece-8370-4e3e-9127-a932d4452f5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.347543 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.347602 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.347616 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88bdece-8370-4e3e-9127-a932d4452f5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.689884 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-sf2xk" Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.689879 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-sf2xk" event={"ID":"e88bdece-8370-4e3e-9127-a932d4452f5b","Type":"ContainerDied","Data":"2c16c8461825c8c8776ccddd3eeb85569eb822bc0bec7b0b1fd549668b1f65c9"} Nov 29 07:14:20 crc kubenswrapper[4943]: I1129 07:14:20.689930 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c16c8461825c8c8776ccddd3eeb85569eb822bc0bec7b0b1fd549668b1f65c9" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.146118 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.170208 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts\") pod \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.170675 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle\") pod \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.170742 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6ntv\" (UniqueName: \"kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv\") pod \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.170881 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data\") pod \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\" (UID: \"b3fda5f5-7547-4aec-b6ce-55b5b7434e86\") " Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.175671 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts" (OuterVolumeSpecName: "scripts") pod "b3fda5f5-7547-4aec-b6ce-55b5b7434e86" (UID: "b3fda5f5-7547-4aec-b6ce-55b5b7434e86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.190936 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv" (OuterVolumeSpecName: "kube-api-access-n6ntv") pod "b3fda5f5-7547-4aec-b6ce-55b5b7434e86" (UID: "b3fda5f5-7547-4aec-b6ce-55b5b7434e86"). InnerVolumeSpecName "kube-api-access-n6ntv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.204481 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data" (OuterVolumeSpecName: "config-data") pod "b3fda5f5-7547-4aec-b6ce-55b5b7434e86" (UID: "b3fda5f5-7547-4aec-b6ce-55b5b7434e86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.207406 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3fda5f5-7547-4aec-b6ce-55b5b7434e86" (UID: "b3fda5f5-7547-4aec-b6ce-55b5b7434e86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.272944 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.272999 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.273013 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6ntv\" (UniqueName: \"kubernetes.io/projected/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-kube-api-access-n6ntv\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.273024 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3fda5f5-7547-4aec-b6ce-55b5b7434e86-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.699591 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" event={"ID":"b3fda5f5-7547-4aec-b6ce-55b5b7434e86","Type":"ContainerDied","Data":"544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c"} Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.700415 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="544361c08710eb274354dcf2e73cbea9cba6fd7c82fc155a9c68562a648c621c" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.699659 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4qtlw" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.781597 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 07:14:21 crc kubenswrapper[4943]: E1129 07:14:21.781992 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88bdece-8370-4e3e-9127-a932d4452f5b" containerName="nova-manage" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.782013 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88bdece-8370-4e3e-9127-a932d4452f5b" containerName="nova-manage" Nov 29 07:14:21 crc kubenswrapper[4943]: E1129 07:14:21.782031 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3fda5f5-7547-4aec-b6ce-55b5b7434e86" containerName="nova-cell1-conductor-db-sync" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.782040 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3fda5f5-7547-4aec-b6ce-55b5b7434e86" containerName="nova-cell1-conductor-db-sync" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.782236 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88bdece-8370-4e3e-9127-a932d4452f5b" containerName="nova-manage" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.782261 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3fda5f5-7547-4aec-b6ce-55b5b7434e86" containerName="nova-cell1-conductor-db-sync" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.782893 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.784974 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.810504 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.882051 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.882154 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g46pc\" (UniqueName: \"kubernetes.io/projected/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-kube-api-access-g46pc\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.882222 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.983682 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g46pc\" (UniqueName: \"kubernetes.io/projected/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-kube-api-access-g46pc\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.984146 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.984326 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.989284 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:21 crc kubenswrapper[4943]: I1129 07:14:21.989445 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.005455 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g46pc\" (UniqueName: \"kubernetes.io/projected/25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73-kube-api-access-g46pc\") pod \"nova-cell1-conductor-0\" (UID: \"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73\") " pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.051209 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.051268 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.107629 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.570619 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 07:14:22 crc kubenswrapper[4943]: I1129 07:14:22.713084 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73","Type":"ContainerStarted","Data":"8bd216186f13ecf38c379fba90533a7f20333d173c3694b5daad5d67255dffb3"} Nov 29 07:14:23 crc kubenswrapper[4943]: I1129 07:14:23.723060 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73","Type":"ContainerStarted","Data":"ea521fc3068f0b458cf5a6931ce2d26c14a61a5bfd3d39f0489a091b4922df9f"} Nov 29 07:14:23 crc kubenswrapper[4943]: I1129 07:14:23.723737 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:23 crc kubenswrapper[4943]: I1129 07:14:23.752413 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.75239336 podStartE2EDuration="2.75239336s" podCreationTimestamp="2025-11-29 07:14:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:23.737028911 +0000 UTC m=+2438.667117664" watchObservedRunningTime="2025-11-29 07:14:23.75239336 +0000 UTC m=+2438.682482113" Nov 29 07:14:24 crc kubenswrapper[4943]: I1129 07:14:24.988162 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.482613 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.483143 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-log" containerID="cri-o://6b7c2f0460223d05d2f606d871db157b9a9ff2795666b0fefb95d5a3774f8f0f" gracePeriod=30 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.483277 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-api" containerID="cri-o://0aed50fab0a5cd68095745c6e4662cd55debf1c477a06ceefcb7a16c156423c6" gracePeriod=30 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.506500 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.506827 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-log" containerID="cri-o://1fad52e7f2875946b4e39377d80beb50276c171b7ea4acf167152c4be41a02e4" gracePeriod=30 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.506892 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-metadata" containerID="cri-o://4877e4985b0e11a7f3957ee40d34e3adcd8ba6ee8f6f765d7d0b4d0e85144085" gracePeriod=30 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.716027 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.775556 4943 generic.go:334] "Generic (PLEG): container finished" podID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerID="4877e4985b0e11a7f3957ee40d34e3adcd8ba6ee8f6f765d7d0b4d0e85144085" exitCode=0 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.775608 4943 generic.go:334] "Generic (PLEG): container finished" podID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerID="1fad52e7f2875946b4e39377d80beb50276c171b7ea4acf167152c4be41a02e4" exitCode=143 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.775655 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerDied","Data":"4877e4985b0e11a7f3957ee40d34e3adcd8ba6ee8f6f765d7d0b4d0e85144085"} Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.775685 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerDied","Data":"1fad52e7f2875946b4e39377d80beb50276c171b7ea4acf167152c4be41a02e4"} Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.777899 4943 generic.go:334] "Generic (PLEG): container finished" podID="22dac217-35ea-4303-8f79-785445fbb0cd" containerID="0aed50fab0a5cd68095745c6e4662cd55debf1c477a06ceefcb7a16c156423c6" exitCode=0 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.777925 4943 generic.go:334] "Generic (PLEG): container finished" podID="22dac217-35ea-4303-8f79-785445fbb0cd" containerID="6b7c2f0460223d05d2f606d871db157b9a9ff2795666b0fefb95d5a3774f8f0f" exitCode=143 Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.777943 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerDied","Data":"0aed50fab0a5cd68095745c6e4662cd55debf1c477a06ceefcb7a16c156423c6"} Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.777962 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerDied","Data":"6b7c2f0460223d05d2f606d871db157b9a9ff2795666b0fefb95d5a3774f8f0f"} Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.783765 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:25 crc kubenswrapper[4943]: I1129 07:14:25.961471 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.153477 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.165180 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264395 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs\") pod \"22dac217-35ea-4303-8f79-785445fbb0cd\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264459 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data\") pod \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264513 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle\") pod \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264597 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs\") pod \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264632 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs\") pod \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264709 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhwm9\" (UniqueName: \"kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9\") pod \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\" (UID: \"a54099e5-ebae-440e-8b2d-1f1b97464fb7\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264746 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle\") pod \"22dac217-35ea-4303-8f79-785445fbb0cd\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264775 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcpdx\" (UniqueName: \"kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx\") pod \"22dac217-35ea-4303-8f79-785445fbb0cd\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.264809 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data\") pod \"22dac217-35ea-4303-8f79-785445fbb0cd\" (UID: \"22dac217-35ea-4303-8f79-785445fbb0cd\") " Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.265797 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs" (OuterVolumeSpecName: "logs") pod "a54099e5-ebae-440e-8b2d-1f1b97464fb7" (UID: "a54099e5-ebae-440e-8b2d-1f1b97464fb7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.266087 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs" (OuterVolumeSpecName: "logs") pod "22dac217-35ea-4303-8f79-785445fbb0cd" (UID: "22dac217-35ea-4303-8f79-785445fbb0cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.274039 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9" (OuterVolumeSpecName: "kube-api-access-mhwm9") pod "a54099e5-ebae-440e-8b2d-1f1b97464fb7" (UID: "a54099e5-ebae-440e-8b2d-1f1b97464fb7"). InnerVolumeSpecName "kube-api-access-mhwm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.273971 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx" (OuterVolumeSpecName: "kube-api-access-tcpdx") pod "22dac217-35ea-4303-8f79-785445fbb0cd" (UID: "22dac217-35ea-4303-8f79-785445fbb0cd"). InnerVolumeSpecName "kube-api-access-tcpdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.298112 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data" (OuterVolumeSpecName: "config-data") pod "22dac217-35ea-4303-8f79-785445fbb0cd" (UID: "22dac217-35ea-4303-8f79-785445fbb0cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.300912 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "22dac217-35ea-4303-8f79-785445fbb0cd" (UID: "22dac217-35ea-4303-8f79-785445fbb0cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.301401 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a54099e5-ebae-440e-8b2d-1f1b97464fb7" (UID: "a54099e5-ebae-440e-8b2d-1f1b97464fb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.302652 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data" (OuterVolumeSpecName: "config-data") pod "a54099e5-ebae-440e-8b2d-1f1b97464fb7" (UID: "a54099e5-ebae-440e-8b2d-1f1b97464fb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.327849 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:14:26 crc kubenswrapper[4943]: E1129 07:14:26.328192 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.335029 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "a54099e5-ebae-440e-8b2d-1f1b97464fb7" (UID: "a54099e5-ebae-440e-8b2d-1f1b97464fb7"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367471 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhwm9\" (UniqueName: \"kubernetes.io/projected/a54099e5-ebae-440e-8b2d-1f1b97464fb7-kube-api-access-mhwm9\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367509 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367518 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcpdx\" (UniqueName: \"kubernetes.io/projected/22dac217-35ea-4303-8f79-785445fbb0cd-kube-api-access-tcpdx\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367528 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22dac217-35ea-4303-8f79-785445fbb0cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367538 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22dac217-35ea-4303-8f79-785445fbb0cd-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367546 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367557 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367584 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a54099e5-ebae-440e-8b2d-1f1b97464fb7-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.367599 4943 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54099e5-ebae-440e-8b2d-1f1b97464fb7-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.787803 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.787811 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a54099e5-ebae-440e-8b2d-1f1b97464fb7","Type":"ContainerDied","Data":"797c26501d1606f60aadfbf8731a6ae5cd2ea9c510ce093aacb031d6b095f92a"} Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.788420 4943 scope.go:117] "RemoveContainer" containerID="4877e4985b0e11a7f3957ee40d34e3adcd8ba6ee8f6f765d7d0b4d0e85144085" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.793251 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22dac217-35ea-4303-8f79-785445fbb0cd","Type":"ContainerDied","Data":"fca8c7754fe0fbb89066e63c3d5c1074a8916bd28309ada1efe738d1c83a33b8"} Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.793388 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9s2lw" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="registry-server" containerID="cri-o://281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8" gracePeriod=2 Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.793425 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.821742 4943 scope.go:117] "RemoveContainer" containerID="1fad52e7f2875946b4e39377d80beb50276c171b7ea4acf167152c4be41a02e4" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.829289 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.844855 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.849955 4943 scope.go:117] "RemoveContainer" containerID="0aed50fab0a5cd68095745c6e4662cd55debf1c477a06ceefcb7a16c156423c6" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.858728 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.870046 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879257 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: E1129 07:14:26.879715 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-log" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879733 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-log" Nov 29 07:14:26 crc kubenswrapper[4943]: E1129 07:14:26.879747 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-metadata" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879754 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-metadata" Nov 29 07:14:26 crc kubenswrapper[4943]: E1129 07:14:26.879768 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-api" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879774 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-api" Nov 29 07:14:26 crc kubenswrapper[4943]: E1129 07:14:26.879783 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-log" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879790 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-log" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879975 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-log" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.879990 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-log" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.880017 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" containerName="nova-metadata-metadata" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.880027 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" containerName="nova-api-api" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.880934 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.883066 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.883256 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.890043 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.893887 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.895554 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.902023 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.906696 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.970853 4943 scope.go:117] "RemoveContainer" containerID="6b7c2f0460223d05d2f606d871db157b9a9ff2795666b0fefb95d5a3774f8f0f" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.980033 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgbz5\" (UniqueName: \"kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.980292 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.980397 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.980812 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:26 crc kubenswrapper[4943]: I1129 07:14:26.980963 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082644 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgbz5\" (UniqueName: \"kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082708 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082731 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082783 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082799 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082832 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm2lj\" (UniqueName: \"kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082895 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082911 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.082927 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.085135 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.091058 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.091126 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.091237 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.103042 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgbz5\" (UniqueName: \"kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5\") pod \"nova-metadata-0\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.133552 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.184463 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.184587 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.184608 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.184638 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm2lj\" (UniqueName: \"kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.186080 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.189253 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.189407 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.202415 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm2lj\" (UniqueName: \"kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj\") pod \"nova-api-0\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.222505 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.271120 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.290707 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.299399 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.356403 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22dac217-35ea-4303-8f79-785445fbb0cd" path="/var/lib/kubelet/pods/22dac217-35ea-4303-8f79-785445fbb0cd/volumes" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.357259 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a54099e5-ebae-440e-8b2d-1f1b97464fb7" path="/var/lib/kubelet/pods/a54099e5-ebae-440e-8b2d-1f1b97464fb7/volumes" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.392102 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities\") pod \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.392171 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx5m4\" (UniqueName: \"kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4\") pod \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.392217 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content\") pod \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\" (UID: \"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0\") " Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.393268 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities" (OuterVolumeSpecName: "utilities") pod "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" (UID: "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.396234 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4" (OuterVolumeSpecName: "kube-api-access-tx5m4") pod "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" (UID: "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0"). InnerVolumeSpecName "kube-api-access-tx5m4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.471851 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" (UID: "07b7f666-65f6-4e54-bf3d-acebc8e1c8f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.495660 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.495685 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx5m4\" (UniqueName: \"kubernetes.io/projected/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-kube-api-access-tx5m4\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.495698 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.776463 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.786708 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.813810 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerStarted","Data":"959485fce1468942eb5b762e0adcd9075dfeb81b7b6ca327238efa32346715a8"} Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.818856 4943 generic.go:334] "Generic (PLEG): container finished" podID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerID="281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8" exitCode=0 Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.819076 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerDied","Data":"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8"} Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.819169 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9s2lw" event={"ID":"07b7f666-65f6-4e54-bf3d-acebc8e1c8f0","Type":"ContainerDied","Data":"8811b0207564e01e605a46ce15a5b527769849216decf3b747167cee5cf19043"} Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.819291 4943 scope.go:117] "RemoveContainer" containerID="281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.819490 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9s2lw" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.887119 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.890161 4943 scope.go:117] "RemoveContainer" containerID="5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.894274 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9s2lw"] Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.965857 4943 scope.go:117] "RemoveContainer" containerID="565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.988535 4943 scope.go:117] "RemoveContainer" containerID="281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8" Nov 29 07:14:27 crc kubenswrapper[4943]: E1129 07:14:27.989441 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8\": container with ID starting with 281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8 not found: ID does not exist" containerID="281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.989558 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8"} err="failed to get container status \"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8\": rpc error: code = NotFound desc = could not find container \"281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8\": container with ID starting with 281f47f0e0a65461e33302b8b88f2e83a87ea2ffc163c022f759096be3e5cfc8 not found: ID does not exist" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.989675 4943 scope.go:117] "RemoveContainer" containerID="5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846" Nov 29 07:14:27 crc kubenswrapper[4943]: E1129 07:14:27.989980 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846\": container with ID starting with 5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846 not found: ID does not exist" containerID="5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.990055 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846"} err="failed to get container status \"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846\": rpc error: code = NotFound desc = could not find container \"5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846\": container with ID starting with 5e007c60b77a8f8166d7d4d97a6ad0193e6971a22b91ba86af64212216d96846 not found: ID does not exist" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.990120 4943 scope.go:117] "RemoveContainer" containerID="565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d" Nov 29 07:14:27 crc kubenswrapper[4943]: E1129 07:14:27.990359 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d\": container with ID starting with 565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d not found: ID does not exist" containerID="565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d" Nov 29 07:14:27 crc kubenswrapper[4943]: I1129 07:14:27.990537 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d"} err="failed to get container status \"565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d\": rpc error: code = NotFound desc = could not find container \"565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d\": container with ID starting with 565269b7c708aef18315241fa28a1dbcb244f6d63e9f4f8854555383942da42d not found: ID does not exist" Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.829173 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerStarted","Data":"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9"} Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.829245 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerStarted","Data":"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f"} Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.829260 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerStarted","Data":"50450c4afdd72dbdf8aaeab27c162cde21cf02d4feee44cef16413b22129f001"} Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.832094 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerStarted","Data":"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434"} Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.832129 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerStarted","Data":"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad"} Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.857893 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.857873696 podStartE2EDuration="2.857873696s" podCreationTimestamp="2025-11-29 07:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:28.848843693 +0000 UTC m=+2443.778932446" watchObservedRunningTime="2025-11-29 07:14:28.857873696 +0000 UTC m=+2443.787962449" Nov 29 07:14:28 crc kubenswrapper[4943]: I1129 07:14:28.872105 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8720857669999997 podStartE2EDuration="2.872085767s" podCreationTimestamp="2025-11-29 07:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:28.870771545 +0000 UTC m=+2443.800860328" watchObservedRunningTime="2025-11-29 07:14:28.872085767 +0000 UTC m=+2443.802174520" Nov 29 07:14:29 crc kubenswrapper[4943]: I1129 07:14:29.337929 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" path="/var/lib/kubelet/pods/07b7f666-65f6-4e54-bf3d-acebc8e1c8f0/volumes" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.178592 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-64478cdc57-fhzpm" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.248244 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.248621 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7d6765c5fb-nmqsz" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-httpd" containerID="cri-o://3cb8bc656cc4f81b6a6eda2bcdc07ecb2d6450c4fa4b59efec3c5615836bb535" gracePeriod=30 Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.248801 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7d6765c5fb-nmqsz" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-api" containerID="cri-o://37a6b2f8e99fb71f260e794a4a34944b4b9b615e2fa46270abe2a04a46405a3e" gracePeriod=30 Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.381698 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:30 crc kubenswrapper[4943]: E1129 07:14:30.382148 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="extract-content" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.382163 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="extract-content" Nov 29 07:14:30 crc kubenswrapper[4943]: E1129 07:14:30.382179 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="extract-utilities" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.382185 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="extract-utilities" Nov 29 07:14:30 crc kubenswrapper[4943]: E1129 07:14:30.382207 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="registry-server" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.382214 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="registry-server" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.382390 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="07b7f666-65f6-4e54-bf3d-acebc8e1c8f0" containerName="registry-server" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.383669 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.393822 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.550914 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7mvl\" (UniqueName: \"kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.550959 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.551019 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.652817 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7mvl\" (UniqueName: \"kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.652880 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.652944 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.653423 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.653961 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.686535 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7mvl\" (UniqueName: \"kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl\") pod \"redhat-marketplace-dg84m\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.730489 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.870354 4943 generic.go:334] "Generic (PLEG): container finished" podID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerID="3cb8bc656cc4f81b6a6eda2bcdc07ecb2d6450c4fa4b59efec3c5615836bb535" exitCode=0 Nov 29 07:14:30 crc kubenswrapper[4943]: I1129 07:14:30.870745 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerDied","Data":"3cb8bc656cc4f81b6a6eda2bcdc07ecb2d6450c4fa4b59efec3c5615836bb535"} Nov 29 07:14:31 crc kubenswrapper[4943]: I1129 07:14:31.209890 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:31 crc kubenswrapper[4943]: W1129 07:14:31.218112 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod496b155b_0379_41ec_8dba_600b70e4b6b1.slice/crio-84ab84432b18735830b3dfc41a053f10b97b5829963f6820f65ba4faadc73afd WatchSource:0}: Error finding container 84ab84432b18735830b3dfc41a053f10b97b5829963f6820f65ba4faadc73afd: Status 404 returned error can't find the container with id 84ab84432b18735830b3dfc41a053f10b97b5829963f6820f65ba4faadc73afd Nov 29 07:14:31 crc kubenswrapper[4943]: I1129 07:14:31.882217 4943 generic.go:334] "Generic (PLEG): container finished" podID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerID="9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896" exitCode=0 Nov 29 07:14:31 crc kubenswrapper[4943]: I1129 07:14:31.882350 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerDied","Data":"9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896"} Nov 29 07:14:31 crc kubenswrapper[4943]: I1129 07:14:31.882793 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerStarted","Data":"84ab84432b18735830b3dfc41a053f10b97b5829963f6820f65ba4faadc73afd"} Nov 29 07:14:32 crc kubenswrapper[4943]: I1129 07:14:32.275763 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:32 crc kubenswrapper[4943]: I1129 07:14:32.275900 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:14:33 crc kubenswrapper[4943]: I1129 07:14:33.910937 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerStarted","Data":"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c"} Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.516958 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.933030 4943 generic.go:334] "Generic (PLEG): container finished" podID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerID="37a6b2f8e99fb71f260e794a4a34944b4b9b615e2fa46270abe2a04a46405a3e" exitCode=0 Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.933072 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerDied","Data":"37a6b2f8e99fb71f260e794a4a34944b4b9b615e2fa46270abe2a04a46405a3e"} Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.938629 4943 generic.go:334] "Generic (PLEG): container finished" podID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerID="5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c" exitCode=0 Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.938671 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerDied","Data":"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c"} Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.938697 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerStarted","Data":"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a"} Nov 29 07:14:34 crc kubenswrapper[4943]: I1129 07:14:34.969500 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dg84m" podStartSLOduration=2.412525442 podStartE2EDuration="4.969476724s" podCreationTimestamp="2025-11-29 07:14:30 +0000 UTC" firstStartedPulling="2025-11-29 07:14:31.884736091 +0000 UTC m=+2446.814824834" lastFinishedPulling="2025-11-29 07:14:34.441687363 +0000 UTC m=+2449.371776116" observedRunningTime="2025-11-29 07:14:34.95917 +0000 UTC m=+2449.889258763" watchObservedRunningTime="2025-11-29 07:14:34.969476724 +0000 UTC m=+2449.899565477" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.172676 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.280447 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config\") pod \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.280612 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs\") pod \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.280646 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klb2m\" (UniqueName: \"kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m\") pod \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.280684 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config\") pod \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.280730 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle\") pod \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\" (UID: \"922c84d9-bb4a-4834-a07e-011c2d9cec4d\") " Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.288154 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "922c84d9-bb4a-4834-a07e-011c2d9cec4d" (UID: "922c84d9-bb4a-4834-a07e-011c2d9cec4d"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.288760 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m" (OuterVolumeSpecName: "kube-api-access-klb2m") pod "922c84d9-bb4a-4834-a07e-011c2d9cec4d" (UID: "922c84d9-bb4a-4834-a07e-011c2d9cec4d"). InnerVolumeSpecName "kube-api-access-klb2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.346047 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config" (OuterVolumeSpecName: "config") pod "922c84d9-bb4a-4834-a07e-011c2d9cec4d" (UID: "922c84d9-bb4a-4834-a07e-011c2d9cec4d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.356601 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "922c84d9-bb4a-4834-a07e-011c2d9cec4d" (UID: "922c84d9-bb4a-4834-a07e-011c2d9cec4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.370090 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "922c84d9-bb4a-4834-a07e-011c2d9cec4d" (UID: "922c84d9-bb4a-4834-a07e-011c2d9cec4d"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.382970 4943 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.383004 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klb2m\" (UniqueName: \"kubernetes.io/projected/922c84d9-bb4a-4834-a07e-011c2d9cec4d-kube-api-access-klb2m\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.383016 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.383026 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.383034 4943 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/922c84d9-bb4a-4834-a07e-011c2d9cec4d-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.953232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d6765c5fb-nmqsz" event={"ID":"922c84d9-bb4a-4834-a07e-011c2d9cec4d","Type":"ContainerDied","Data":"b8feccb9ea48b2b34fde3ff96a5ee97fee732739981e6fb34b8fb1452116b56e"} Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.953303 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d6765c5fb-nmqsz" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.953336 4943 scope.go:117] "RemoveContainer" containerID="3cb8bc656cc4f81b6a6eda2bcdc07ecb2d6450c4fa4b59efec3c5615836bb535" Nov 29 07:14:35 crc kubenswrapper[4943]: I1129 07:14:35.975156 4943 scope.go:117] "RemoveContainer" containerID="37a6b2f8e99fb71f260e794a4a34944b4b9b615e2fa46270abe2a04a46405a3e" Nov 29 07:14:36 crc kubenswrapper[4943]: I1129 07:14:36.000623 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:14:36 crc kubenswrapper[4943]: I1129 07:14:36.008254 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7d6765c5fb-nmqsz"] Nov 29 07:14:37 crc kubenswrapper[4943]: I1129 07:14:37.272239 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 07:14:37 crc kubenswrapper[4943]: I1129 07:14:37.272354 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 07:14:37 crc kubenswrapper[4943]: I1129 07:14:37.293047 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 07:14:37 crc kubenswrapper[4943]: I1129 07:14:37.293122 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 07:14:37 crc kubenswrapper[4943]: I1129 07:14:37.355393 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" path="/var/lib/kubelet/pods/922c84d9-bb4a-4834-a07e-011c2d9cec4d/volumes" Nov 29 07:14:38 crc kubenswrapper[4943]: I1129 07:14:38.284765 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:14:38 crc kubenswrapper[4943]: I1129 07:14:38.284954 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:14:38 crc kubenswrapper[4943]: I1129 07:14:38.328608 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:14:38 crc kubenswrapper[4943]: E1129 07:14:38.328805 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:38 crc kubenswrapper[4943]: I1129 07:14:38.375004 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:14:38 crc kubenswrapper[4943]: I1129 07:14:38.375063 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:14:40 crc kubenswrapper[4943]: I1129 07:14:40.731704 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:40 crc kubenswrapper[4943]: I1129 07:14:40.732724 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:40 crc kubenswrapper[4943]: I1129 07:14:40.780996 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:41 crc kubenswrapper[4943]: I1129 07:14:41.042340 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:41 crc kubenswrapper[4943]: I1129 07:14:41.089788 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.068531 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dg84m" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="registry-server" containerID="cri-o://504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a" gracePeriod=2 Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.568440 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.682222 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content\") pod \"496b155b-0379-41ec-8dba-600b70e4b6b1\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.682372 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities\") pod \"496b155b-0379-41ec-8dba-600b70e4b6b1\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.682419 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7mvl\" (UniqueName: \"kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl\") pod \"496b155b-0379-41ec-8dba-600b70e4b6b1\" (UID: \"496b155b-0379-41ec-8dba-600b70e4b6b1\") " Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.683744 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities" (OuterVolumeSpecName: "utilities") pod "496b155b-0379-41ec-8dba-600b70e4b6b1" (UID: "496b155b-0379-41ec-8dba-600b70e4b6b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.688235 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl" (OuterVolumeSpecName: "kube-api-access-z7mvl") pod "496b155b-0379-41ec-8dba-600b70e4b6b1" (UID: "496b155b-0379-41ec-8dba-600b70e4b6b1"). InnerVolumeSpecName "kube-api-access-z7mvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.718029 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "496b155b-0379-41ec-8dba-600b70e4b6b1" (UID: "496b155b-0379-41ec-8dba-600b70e4b6b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.784438 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.787413 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7mvl\" (UniqueName: \"kubernetes.io/projected/496b155b-0379-41ec-8dba-600b70e4b6b1-kube-api-access-z7mvl\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:43 crc kubenswrapper[4943]: I1129 07:14:43.787439 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/496b155b-0379-41ec-8dba-600b70e4b6b1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.086965 4943 generic.go:334] "Generic (PLEG): container finished" podID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerID="504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a" exitCode=0 Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.087039 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerDied","Data":"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a"} Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.087083 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dg84m" event={"ID":"496b155b-0379-41ec-8dba-600b70e4b6b1","Type":"ContainerDied","Data":"84ab84432b18735830b3dfc41a053f10b97b5829963f6820f65ba4faadc73afd"} Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.087110 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dg84m" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.087128 4943 scope.go:117] "RemoveContainer" containerID="504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.119648 4943 scope.go:117] "RemoveContainer" containerID="5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.150745 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.159344 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dg84m"] Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.163833 4943 scope.go:117] "RemoveContainer" containerID="9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.201316 4943 scope.go:117] "RemoveContainer" containerID="504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a" Nov 29 07:14:44 crc kubenswrapper[4943]: E1129 07:14:44.202980 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a\": container with ID starting with 504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a not found: ID does not exist" containerID="504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.203008 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a"} err="failed to get container status \"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a\": rpc error: code = NotFound desc = could not find container \"504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a\": container with ID starting with 504d2c0860dbf4aeeb4746e3eebef25786bb1388a4137f9e1f51e64847c2850a not found: ID does not exist" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.203028 4943 scope.go:117] "RemoveContainer" containerID="5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c" Nov 29 07:14:44 crc kubenswrapper[4943]: E1129 07:14:44.203347 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c\": container with ID starting with 5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c not found: ID does not exist" containerID="5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.203427 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c"} err="failed to get container status \"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c\": rpc error: code = NotFound desc = could not find container \"5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c\": container with ID starting with 5abd85ec8c921b8e41ac158b1d3e0769e90651f8382a94164a07e52c2258763c not found: ID does not exist" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.203493 4943 scope.go:117] "RemoveContainer" containerID="9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896" Nov 29 07:14:44 crc kubenswrapper[4943]: E1129 07:14:44.203829 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896\": container with ID starting with 9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896 not found: ID does not exist" containerID="9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.203851 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896"} err="failed to get container status \"9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896\": rpc error: code = NotFound desc = could not find container \"9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896\": container with ID starting with 9daa5cde058c26840887eec7bcf96bb83a5cfc2767afce1fd91c769aba8e0896 not found: ID does not exist" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.749162 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.756320 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910018 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle\") pod \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910125 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data\") pod \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910349 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data\") pod \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910524 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vc42\" (UniqueName: \"kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42\") pod \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910700 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle\") pod \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\" (UID: \"c2ea1d69-a79d-45c0-8c74-c394cedf30ce\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.910775 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwcgd\" (UniqueName: \"kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd\") pod \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\" (UID: \"2768e1a6-f9ab-4039-b6b9-f3593613e0cd\") " Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.923068 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42" (OuterVolumeSpecName: "kube-api-access-2vc42") pod "c2ea1d69-a79d-45c0-8c74-c394cedf30ce" (UID: "c2ea1d69-a79d-45c0-8c74-c394cedf30ce"). InnerVolumeSpecName "kube-api-access-2vc42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.925804 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd" (OuterVolumeSpecName: "kube-api-access-xwcgd") pod "2768e1a6-f9ab-4039-b6b9-f3593613e0cd" (UID: "2768e1a6-f9ab-4039-b6b9-f3593613e0cd"). InnerVolumeSpecName "kube-api-access-xwcgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.956851 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data" (OuterVolumeSpecName: "config-data") pod "2768e1a6-f9ab-4039-b6b9-f3593613e0cd" (UID: "2768e1a6-f9ab-4039-b6b9-f3593613e0cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.958729 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2768e1a6-f9ab-4039-b6b9-f3593613e0cd" (UID: "2768e1a6-f9ab-4039-b6b9-f3593613e0cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.962538 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2ea1d69-a79d-45c0-8c74-c394cedf30ce" (UID: "c2ea1d69-a79d-45c0-8c74-c394cedf30ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:44 crc kubenswrapper[4943]: I1129 07:14:44.963371 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data" (OuterVolumeSpecName: "config-data") pod "c2ea1d69-a79d-45c0-8c74-c394cedf30ce" (UID: "c2ea1d69-a79d-45c0-8c74-c394cedf30ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013822 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013898 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vc42\" (UniqueName: \"kubernetes.io/projected/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-kube-api-access-2vc42\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013918 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ea1d69-a79d-45c0-8c74-c394cedf30ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013934 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwcgd\" (UniqueName: \"kubernetes.io/projected/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-kube-api-access-xwcgd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013948 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.013960 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2768e1a6-f9ab-4039-b6b9-f3593613e0cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.103944 4943 generic.go:334] "Generic (PLEG): container finished" podID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" containerID="31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9" exitCode=137 Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.104029 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2768e1a6-f9ab-4039-b6b9-f3593613e0cd","Type":"ContainerDied","Data":"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9"} Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.104066 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2768e1a6-f9ab-4039-b6b9-f3593613e0cd","Type":"ContainerDied","Data":"b68b7fb284305e779433a4017847eed7a3ed651ca2bb63fca47e3f6b4e1d4b73"} Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.104084 4943 scope.go:117] "RemoveContainer" containerID="31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.104087 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.108862 4943 generic.go:334] "Generic (PLEG): container finished" podID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" containerID="9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2" exitCode=137 Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.108933 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c2ea1d69-a79d-45c0-8c74-c394cedf30ce","Type":"ContainerDied","Data":"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2"} Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.109026 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c2ea1d69-a79d-45c0-8c74-c394cedf30ce","Type":"ContainerDied","Data":"90cfd339e4c23e711eb24ae1a7ce924f6e333be42c74479706d09d255725cd31"} Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.109065 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.155402 4943 scope.go:117] "RemoveContainer" containerID="31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.156306 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9\": container with ID starting with 31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9 not found: ID does not exist" containerID="31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.156802 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9"} err="failed to get container status \"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9\": rpc error: code = NotFound desc = could not find container \"31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9\": container with ID starting with 31cccf9866d979aa768834b04286504af6e87d451e141f641b6f651d7c1ad0a9 not found: ID does not exist" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.157364 4943 scope.go:117] "RemoveContainer" containerID="9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.164887 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.176573 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.186834 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187337 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-api" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187364 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-api" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187396 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="registry-server" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187404 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="registry-server" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187421 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" containerName="nova-scheduler-scheduler" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187428 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" containerName="nova-scheduler-scheduler" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187481 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="extract-content" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187490 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="extract-content" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187507 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" containerName="nova-cell1-novncproxy-novncproxy" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187515 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" containerName="nova-cell1-novncproxy-novncproxy" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187528 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-httpd" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187536 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-httpd" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.187554 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="extract-utilities" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187581 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="extract-utilities" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187813 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" containerName="nova-cell1-novncproxy-novncproxy" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187846 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" containerName="nova-scheduler-scheduler" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187863 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-httpd" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187878 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" containerName="registry-server" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.187915 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="922c84d9-bb4a-4834-a07e-011c2d9cec4d" containerName="neutron-api" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.188765 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.193830 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.201277 4943 scope.go:117] "RemoveContainer" containerID="9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2" Nov 29 07:14:45 crc kubenswrapper[4943]: E1129 07:14:45.203492 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2\": container with ID starting with 9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2 not found: ID does not exist" containerID="9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.203539 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2"} err="failed to get container status \"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2\": rpc error: code = NotFound desc = could not find container \"9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2\": container with ID starting with 9760a9fe752569ae933c793880b61d5615de851fc493f7deb3553dcad529a8e2 not found: ID does not exist" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.210287 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.215706 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.223327 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.250154 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.251801 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.255030 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.255170 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.255902 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.264316 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.318844 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.318911 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.319305 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkxjx\" (UniqueName: \"kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.339399 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2768e1a6-f9ab-4039-b6b9-f3593613e0cd" path="/var/lib/kubelet/pods/2768e1a6-f9ab-4039-b6b9-f3593613e0cd/volumes" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.340619 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496b155b-0379-41ec-8dba-600b70e4b6b1" path="/var/lib/kubelet/pods/496b155b-0379-41ec-8dba-600b70e4b6b1/volumes" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.342160 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2ea1d69-a79d-45c0-8c74-c394cedf30ce" path="/var/lib/kubelet/pods/c2ea1d69-a79d-45c0-8c74-c394cedf30ce/volumes" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.420879 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.420981 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421091 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz542\" (UniqueName: \"kubernetes.io/projected/9511ac38-5f33-48e0-a59a-197e253fbc8e-kube-api-access-pz542\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421336 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421484 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421743 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkxjx\" (UniqueName: \"kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.421832 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.423091 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.425006 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.443768 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.444264 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkxjx\" (UniqueName: \"kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx\") pod \"nova-scheduler-0\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.506552 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.523976 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.524054 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz542\" (UniqueName: \"kubernetes.io/projected/9511ac38-5f33-48e0-a59a-197e253fbc8e-kube-api-access-pz542\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.524104 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.524168 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.524256 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.529549 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.529806 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.530012 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.531128 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.538630 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.541086 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.542040 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9511ac38-5f33-48e0-a59a-197e253fbc8e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.569930 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz542\" (UniqueName: \"kubernetes.io/projected/9511ac38-5f33-48e0-a59a-197e253fbc8e-kube-api-access-pz542\") pod \"nova-cell1-novncproxy-0\" (UID: \"9511ac38-5f33-48e0-a59a-197e253fbc8e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.606277 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.952101 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:45 crc kubenswrapper[4943]: I1129 07:14:45.960869 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:14:45 crc kubenswrapper[4943]: W1129 07:14:45.963872 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod018a61fe_e314_4c8f_b99b_df417631b935.slice/crio-dbdf1f66eddb389590aba09842bce5f3c00942d34535c1b4fcba236f4c685a93 WatchSource:0}: Error finding container dbdf1f66eddb389590aba09842bce5f3c00942d34535c1b4fcba236f4c685a93: Status 404 returned error can't find the container with id dbdf1f66eddb389590aba09842bce5f3c00942d34535c1b4fcba236f4c685a93 Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.121822 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"018a61fe-e314-4c8f-b99b-df417631b935","Type":"ContainerStarted","Data":"dbdf1f66eddb389590aba09842bce5f3c00942d34535c1b4fcba236f4c685a93"} Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.127244 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 07:14:46 crc kubenswrapper[4943]: W1129 07:14:46.127666 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9511ac38_5f33_48e0_a59a_197e253fbc8e.slice/crio-21dbcc40436c020577c68e356fe68abc2dd661686a9e263e9f092141309917be WatchSource:0}: Error finding container 21dbcc40436c020577c68e356fe68abc2dd661686a9e263e9f092141309917be: Status 404 returned error can't find the container with id 21dbcc40436c020577c68e356fe68abc2dd661686a9e263e9f092141309917be Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.128237 4943 generic.go:334] "Generic (PLEG): container finished" podID="73439687-a401-401c-bbbf-48de7fed3a51" containerID="a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c" exitCode=137 Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.128284 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerDied","Data":"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c"} Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.128318 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73439687-a401-401c-bbbf-48de7fed3a51","Type":"ContainerDied","Data":"58758ddd77cadb7c1da305ab14cfde04789d1bded933ec60c2cb13841f7758ea"} Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.128339 4943 scope.go:117] "RemoveContainer" containerID="a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.128626 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.135919 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136036 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136117 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136162 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wffs2\" (UniqueName: \"kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136190 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136230 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136290 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.136320 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd\") pod \"73439687-a401-401c-bbbf-48de7fed3a51\" (UID: \"73439687-a401-401c-bbbf-48de7fed3a51\") " Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.137478 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.137891 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.143720 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2" (OuterVolumeSpecName: "kube-api-access-wffs2") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "kube-api-access-wffs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.144015 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts" (OuterVolumeSpecName: "scripts") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.151118 4943 scope.go:117] "RemoveContainer" containerID="6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.169152 4943 scope.go:117] "RemoveContainer" containerID="f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.174064 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.191624 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.218690 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.228734 4943 scope.go:117] "RemoveContainer" containerID="164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238097 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238132 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238146 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wffs2\" (UniqueName: \"kubernetes.io/projected/73439687-a401-401c-bbbf-48de7fed3a51-kube-api-access-wffs2\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238159 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238170 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238180 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.238191 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73439687-a401-401c-bbbf-48de7fed3a51-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.240684 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data" (OuterVolumeSpecName: "config-data") pod "73439687-a401-401c-bbbf-48de7fed3a51" (UID: "73439687-a401-401c-bbbf-48de7fed3a51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.340219 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73439687-a401-401c-bbbf-48de7fed3a51-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.363534 4943 scope.go:117] "RemoveContainer" containerID="a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.365895 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c\": container with ID starting with a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c not found: ID does not exist" containerID="a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.365942 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c"} err="failed to get container status \"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c\": rpc error: code = NotFound desc = could not find container \"a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c\": container with ID starting with a24d1453e1c228c51178ef55f583f20f8512c1290d298b839b3b6bd189c4fe9c not found: ID does not exist" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.365972 4943 scope.go:117] "RemoveContainer" containerID="6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.366357 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df\": container with ID starting with 6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df not found: ID does not exist" containerID="6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.366394 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df"} err="failed to get container status \"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df\": rpc error: code = NotFound desc = could not find container \"6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df\": container with ID starting with 6ab3098eef830ef9348bc953223c1003bfe45210b3e36774e4e97307da5863df not found: ID does not exist" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.366419 4943 scope.go:117] "RemoveContainer" containerID="f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.367287 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024\": container with ID starting with f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024 not found: ID does not exist" containerID="f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.367329 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024"} err="failed to get container status \"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024\": rpc error: code = NotFound desc = could not find container \"f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024\": container with ID starting with f9a1b74418328a2b377a43fc202ecfc601eab6d664cb8e297896e911aadfd024 not found: ID does not exist" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.367354 4943 scope.go:117] "RemoveContainer" containerID="164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.368399 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00\": container with ID starting with 164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00 not found: ID does not exist" containerID="164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.368437 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00"} err="failed to get container status \"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00\": rpc error: code = NotFound desc = could not find container \"164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00\": container with ID starting with 164330ffa6a72d4e3f8988660bf145849faf932b5581353f6dfdd103e8380c00 not found: ID does not exist" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.474040 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.483682 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.493059 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.494276 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="proxy-httpd" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.494295 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="proxy-httpd" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.494309 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="sg-core" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.495093 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="sg-core" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.495111 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-notification-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.495117 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-notification-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: E1129 07:14:46.495147 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-central-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.495153 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-central-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.508864 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="sg-core" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.508990 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="proxy-httpd" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.509010 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-central-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.509045 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="73439687-a401-401c-bbbf-48de7fed3a51" containerName="ceilometer-notification-agent" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.512677 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.515606 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.520520 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.521991 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.530181 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543633 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543721 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543753 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543780 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543811 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543846 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543867 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.543970 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9kd8\" (UniqueName: \"kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645265 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645582 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645608 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645628 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645652 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645675 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645689 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645731 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.645745 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9kd8\" (UniqueName: \"kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.646478 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.651183 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.651380 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.651627 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.652327 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.662780 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.663336 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9kd8\" (UniqueName: \"kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8\") pod \"ceilometer-0\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " pod="openstack/ceilometer-0" Nov 29 07:14:46 crc kubenswrapper[4943]: I1129 07:14:46.845234 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.087829 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:47 crc kubenswrapper[4943]: W1129 07:14:47.090389 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda166d56d_c74c_43a6_a1e4_e955f615f6b6.slice/crio-1ddf45628f90f1ca0f4e5d5816944c33ac0b778298168d38a4ba3bac52205cf3 WatchSource:0}: Error finding container 1ddf45628f90f1ca0f4e5d5816944c33ac0b778298168d38a4ba3bac52205cf3: Status 404 returned error can't find the container with id 1ddf45628f90f1ca0f4e5d5816944c33ac0b778298168d38a4ba3bac52205cf3 Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.139229 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"018a61fe-e314-4c8f-b99b-df417631b935","Type":"ContainerStarted","Data":"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149"} Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.143687 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerStarted","Data":"1ddf45628f90f1ca0f4e5d5816944c33ac0b778298168d38a4ba3bac52205cf3"} Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.145489 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9511ac38-5f33-48e0-a59a-197e253fbc8e","Type":"ContainerStarted","Data":"af15095a62e3a76e8db234efc2ade45c60e9eea27c8ff4d82af521d8e9f449b2"} Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.145521 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9511ac38-5f33-48e0-a59a-197e253fbc8e","Type":"ContainerStarted","Data":"21dbcc40436c020577c68e356fe68abc2dd661686a9e263e9f092141309917be"} Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.192528 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.192504777 podStartE2EDuration="2.192504777s" podCreationTimestamp="2025-11-29 07:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:47.164957638 +0000 UTC m=+2462.095046381" watchObservedRunningTime="2025-11-29 07:14:47.192504777 +0000 UTC m=+2462.122593550" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.193446 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.193437981 podStartE2EDuration="2.193437981s" podCreationTimestamp="2025-11-29 07:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:47.181688601 +0000 UTC m=+2462.111777374" watchObservedRunningTime="2025-11-29 07:14:47.193437981 +0000 UTC m=+2462.123526744" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.277462 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.284106 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.287392 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.296129 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.297384 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.299249 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.303637 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 07:14:47 crc kubenswrapper[4943]: I1129 07:14:47.338090 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73439687-a401-401c-bbbf-48de7fed3a51" path="/var/lib/kubelet/pods/73439687-a401-401c-bbbf-48de7fed3a51/volumes" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.154890 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerStarted","Data":"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb"} Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.155603 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.158823 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.171798 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.362135 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.365337 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.380573 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.474262 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.474314 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.474411 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.474491 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s97s5\" (UniqueName: \"kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.474526 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.575659 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.575707 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.575779 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.575834 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s97s5\" (UniqueName: \"kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.575856 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.576847 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.576855 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.577376 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.577618 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.601929 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s97s5\" (UniqueName: \"kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5\") pod \"dnsmasq-dns-5b856c5697-vzvpz\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:48 crc kubenswrapper[4943]: I1129 07:14:48.682283 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.127672 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.168362 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" event={"ID":"61e0b52f-bff7-4aaa-a0e3-309165bc0513","Type":"ContainerStarted","Data":"ac974df5f3425681d436ad56e13e97c1f20e70c884196fc231a50b9d3ee443ba"} Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.172262 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerStarted","Data":"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c"} Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.349095 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:14:49 crc kubenswrapper[4943]: E1129 07:14:49.349371 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.837155 4943 scope.go:117] "RemoveContainer" containerID="da186a175914ca6eb05a7638c8b22082f72526a28c543d11c2bb45498468f6b5" Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.890783 4943 scope.go:117] "RemoveContainer" containerID="7c1e17d6e133478b720e92b2f1690ded1a4fa0f19ec406b9dfb3f9838e14345b" Nov 29 07:14:49 crc kubenswrapper[4943]: I1129 07:14:49.910317 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:50 crc kubenswrapper[4943]: I1129 07:14:50.183731 4943 generic.go:334] "Generic (PLEG): container finished" podID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerID="b768dd1600aaf9f8eb9d4d89decdb235a5a6c32f57113ea21a9285a483634c9b" exitCode=0 Nov 29 07:14:50 crc kubenswrapper[4943]: I1129 07:14:50.183852 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" event={"ID":"61e0b52f-bff7-4aaa-a0e3-309165bc0513","Type":"ContainerDied","Data":"b768dd1600aaf9f8eb9d4d89decdb235a5a6c32f57113ea21a9285a483634c9b"} Nov 29 07:14:50 crc kubenswrapper[4943]: I1129 07:14:50.506914 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 07:14:50 crc kubenswrapper[4943]: I1129 07:14:50.607676 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:50 crc kubenswrapper[4943]: I1129 07:14:50.707902 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.195658 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" event={"ID":"61e0b52f-bff7-4aaa-a0e3-309165bc0513","Type":"ContainerStarted","Data":"22c8c91a6c9d2f661d6e79990c109a6792384289ec91138cb365bb54195de1f4"} Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.197113 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.200973 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerStarted","Data":"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224"} Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.201101 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-log" containerID="cri-o://f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad" gracePeriod=30 Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.201147 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-api" containerID="cri-o://4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434" gracePeriod=30 Nov 29 07:14:51 crc kubenswrapper[4943]: I1129 07:14:51.217793 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" podStartSLOduration=3.217776344 podStartE2EDuration="3.217776344s" podCreationTimestamp="2025-11-29 07:14:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:51.211342956 +0000 UTC m=+2466.141431719" watchObservedRunningTime="2025-11-29 07:14:51.217776344 +0000 UTC m=+2466.147865097" Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212053 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerStarted","Data":"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe"} Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212388 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212326 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="sg-core" containerID="cri-o://4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224" gracePeriod=30 Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212330 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="proxy-httpd" containerID="cri-o://05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe" gracePeriod=30 Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212352 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-notification-agent" containerID="cri-o://21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c" gracePeriod=30 Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.212287 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-central-agent" containerID="cri-o://988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb" gracePeriod=30 Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.221423 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerID="f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad" exitCode=143 Nov 29 07:14:52 crc kubenswrapper[4943]: I1129 07:14:52.221456 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerDied","Data":"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad"} Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234207 4943 generic.go:334] "Generic (PLEG): container finished" podID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerID="05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe" exitCode=0 Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234524 4943 generic.go:334] "Generic (PLEG): container finished" podID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerID="4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224" exitCode=2 Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234536 4943 generic.go:334] "Generic (PLEG): container finished" podID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerID="21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c" exitCode=0 Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234303 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerDied","Data":"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe"} Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234610 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerDied","Data":"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224"} Nov 29 07:14:53 crc kubenswrapper[4943]: I1129 07:14:53.234627 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerDied","Data":"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c"} Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.035870 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187017 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187173 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187209 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187239 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187267 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187381 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187407 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9kd8\" (UniqueName: \"kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187428 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data\") pod \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\" (UID: \"a166d56d-c74c-43a6-a1e4-e955f615f6b6\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187825 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.187945 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.193877 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8" (OuterVolumeSpecName: "kube-api-access-t9kd8") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "kube-api-access-t9kd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.194875 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts" (OuterVolumeSpecName: "scripts") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.216267 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.247248 4943 generic.go:334] "Generic (PLEG): container finished" podID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerID="988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb" exitCode=0 Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.247294 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerDied","Data":"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb"} Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.247321 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a166d56d-c74c-43a6-a1e4-e955f615f6b6","Type":"ContainerDied","Data":"1ddf45628f90f1ca0f4e5d5816944c33ac0b778298168d38a4ba3bac52205cf3"} Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.247320 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.247354 4943 scope.go:117] "RemoveContainer" containerID="05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.253813 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.265378 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.285238 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data" (OuterVolumeSpecName: "config-data") pod "a166d56d-c74c-43a6-a1e4-e955f615f6b6" (UID: "a166d56d-c74c-43a6-a1e4-e955f615f6b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289392 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289415 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289427 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9kd8\" (UniqueName: \"kubernetes.io/projected/a166d56d-c74c-43a6-a1e4-e955f615f6b6-kube-api-access-t9kd8\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289438 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289447 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289456 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289463 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a166d56d-c74c-43a6-a1e4-e955f615f6b6-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.289472 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a166d56d-c74c-43a6-a1e4-e955f615f6b6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.304076 4943 scope.go:117] "RemoveContainer" containerID="4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.321767 4943 scope.go:117] "RemoveContainer" containerID="21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.343963 4943 scope.go:117] "RemoveContainer" containerID="988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.395096 4943 scope.go:117] "RemoveContainer" containerID="05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.395622 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe\": container with ID starting with 05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe not found: ID does not exist" containerID="05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.395662 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe"} err="failed to get container status \"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe\": rpc error: code = NotFound desc = could not find container \"05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe\": container with ID starting with 05511330ec7032222821b9d019e43c174c229e7adfff9d959cdc1b4b2807aefe not found: ID does not exist" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.395687 4943 scope.go:117] "RemoveContainer" containerID="4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.395994 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224\": container with ID starting with 4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224 not found: ID does not exist" containerID="4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.396057 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224"} err="failed to get container status \"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224\": rpc error: code = NotFound desc = could not find container \"4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224\": container with ID starting with 4670ae7ba314dddab244690cd4bbae8aed4696f48f4e019c932872fd6db02224 not found: ID does not exist" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.396081 4943 scope.go:117] "RemoveContainer" containerID="21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.396429 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c\": container with ID starting with 21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c not found: ID does not exist" containerID="21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.396454 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c"} err="failed to get container status \"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c\": rpc error: code = NotFound desc = could not find container \"21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c\": container with ID starting with 21c4bcca52472715f5f5dcce67007a98cc4124b296983cad74cedfbcbf13822c not found: ID does not exist" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.396472 4943 scope.go:117] "RemoveContainer" containerID="988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.396793 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb\": container with ID starting with 988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb not found: ID does not exist" containerID="988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.396822 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb"} err="failed to get container status \"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb\": rpc error: code = NotFound desc = could not find container \"988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb\": container with ID starting with 988f682c7d41a51d0eca791fefb82bc457dfdd44b01dff49672e4b725726b7eb not found: ID does not exist" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.590433 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.604693 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.611546 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.611895 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-notification-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.611913 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-notification-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.611941 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="sg-core" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.611948 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="sg-core" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.611957 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-central-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.611964 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-central-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: E1129 07:14:54.611972 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="proxy-httpd" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.611978 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="proxy-httpd" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.612137 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="sg-core" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.612158 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-notification-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.612167 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="proxy-httpd" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.612179 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" containerName="ceilometer-central-agent" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.616216 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.619647 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.619872 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.622187 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.623958 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.667182 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.798539 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs\") pod \"0a96f0d9-9a50-4657-9564-fefc1d97f758\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.798631 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data\") pod \"0a96f0d9-9a50-4657-9564-fefc1d97f758\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.798679 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle\") pod \"0a96f0d9-9a50-4657-9564-fefc1d97f758\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.798809 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lm2lj\" (UniqueName: \"kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj\") pod \"0a96f0d9-9a50-4657-9564-fefc1d97f758\" (UID: \"0a96f0d9-9a50-4657-9564-fefc1d97f758\") " Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799029 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs" (OuterVolumeSpecName: "logs") pod "0a96f0d9-9a50-4657-9564-fefc1d97f758" (UID: "0a96f0d9-9a50-4657-9564-fefc1d97f758"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799058 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799098 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799181 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799229 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799319 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799354 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799451 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799485 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c2m2\" (UniqueName: \"kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.799595 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a96f0d9-9a50-4657-9564-fefc1d97f758-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.805873 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj" (OuterVolumeSpecName: "kube-api-access-lm2lj") pod "0a96f0d9-9a50-4657-9564-fefc1d97f758" (UID: "0a96f0d9-9a50-4657-9564-fefc1d97f758"). InnerVolumeSpecName "kube-api-access-lm2lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.822151 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data" (OuterVolumeSpecName: "config-data") pod "0a96f0d9-9a50-4657-9564-fefc1d97f758" (UID: "0a96f0d9-9a50-4657-9564-fefc1d97f758"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.830848 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a96f0d9-9a50-4657-9564-fefc1d97f758" (UID: "0a96f0d9-9a50-4657-9564-fefc1d97f758"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.901934 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902033 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902057 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902113 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902139 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c2m2\" (UniqueName: \"kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902177 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902198 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902229 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.902721 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lm2lj\" (UniqueName: \"kubernetes.io/projected/0a96f0d9-9a50-4657-9564-fefc1d97f758-kube-api-access-lm2lj\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.903125 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.903247 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.903288 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a96f0d9-9a50-4657-9564-fefc1d97f758-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.903350 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.905390 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.906045 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.906350 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.908497 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.911207 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.931309 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c2m2\" (UniqueName: \"kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2\") pod \"ceilometer-0\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " pod="openstack/ceilometer-0" Nov 29 07:14:54 crc kubenswrapper[4943]: I1129 07:14:54.943298 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.259518 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerID="4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434" exitCode=0 Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.259628 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.259619 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerDied","Data":"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434"} Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.260053 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0a96f0d9-9a50-4657-9564-fefc1d97f758","Type":"ContainerDied","Data":"959485fce1468942eb5b762e0adcd9075dfeb81b7b6ca327238efa32346715a8"} Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.260079 4943 scope.go:117] "RemoveContainer" containerID="4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.294510 4943 scope.go:117] "RemoveContainer" containerID="f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.310219 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.329768 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.330784 4943 scope.go:117] "RemoveContainer" containerID="4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434" Nov 29 07:14:55 crc kubenswrapper[4943]: E1129 07:14:55.331884 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434\": container with ID starting with 4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434 not found: ID does not exist" containerID="4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.331915 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434"} err="failed to get container status \"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434\": rpc error: code = NotFound desc = could not find container \"4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434\": container with ID starting with 4e14c76def9bdb9bccbc879750ea9fbf112ef0b82fd4b7a92ed0089c562d1434 not found: ID does not exist" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.331939 4943 scope.go:117] "RemoveContainer" containerID="f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad" Nov 29 07:14:55 crc kubenswrapper[4943]: E1129 07:14:55.332671 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad\": container with ID starting with f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad not found: ID does not exist" containerID="f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.332696 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad"} err="failed to get container status \"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad\": rpc error: code = NotFound desc = could not find container \"f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad\": container with ID starting with f429b3c5fa0b9d719376cc1741b718da549389d234b38d4f444e408095fff9ad not found: ID does not exist" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.355818 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" path="/var/lib/kubelet/pods/0a96f0d9-9a50-4657-9564-fefc1d97f758/volumes" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.356680 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a166d56d-c74c-43a6-a1e4-e955f615f6b6" path="/var/lib/kubelet/pods/a166d56d-c74c-43a6-a1e4-e955f615f6b6/volumes" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.357616 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:55 crc kubenswrapper[4943]: E1129 07:14:55.357917 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-log" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.357953 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-log" Nov 29 07:14:55 crc kubenswrapper[4943]: E1129 07:14:55.357974 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-api" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.357982 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-api" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.358141 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-api" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.358162 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a96f0d9-9a50-4657-9564-fefc1d97f758" containerName="nova-api-log" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.359722 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.359865 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.364949 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.365144 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.365260 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.412777 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.417400 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.417550 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48g74\" (UniqueName: \"kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.417602 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.417659 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.417683 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.418061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.507121 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519456 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48g74\" (UniqueName: \"kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519506 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519557 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519623 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519677 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.519763 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.522828 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.527102 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.527888 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.528470 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.532976 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.536869 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48g74\" (UniqueName: \"kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74\") pod \"nova-api-0\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " pod="openstack/nova-api-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.538529 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.608336 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.627247 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:55 crc kubenswrapper[4943]: I1129 07:14:55.677551 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.109479 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:14:56 crc kubenswrapper[4943]: W1129 07:14:56.113238 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod301d3de7_b8b8_4d8e_90b9_0ca150d3cb7d.slice/crio-c8f29393d38a8edc26475f16ddd9f54f223223a834d7495de8435d043ad19203 WatchSource:0}: Error finding container c8f29393d38a8edc26475f16ddd9f54f223223a834d7495de8435d043ad19203: Status 404 returned error can't find the container with id c8f29393d38a8edc26475f16ddd9f54f223223a834d7495de8435d043ad19203 Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.273902 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerStarted","Data":"e8961f2e9da2808a5f9efded300af6c607dbddc8191de59bc7550b712bc6285f"} Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.275586 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerStarted","Data":"c8f29393d38a8edc26475f16ddd9f54f223223a834d7495de8435d043ad19203"} Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.299031 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.311522 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.555328 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-7dwzg"] Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.557107 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.559349 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.559868 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.566529 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-7dwzg"] Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.645913 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5qpt\" (UniqueName: \"kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.645964 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.646082 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.646131 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.747511 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.747606 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.747669 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5qpt\" (UniqueName: \"kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.747693 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.751402 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.751811 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.761479 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.764082 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5qpt\" (UniqueName: \"kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt\") pod \"nova-cell1-cell-mapping-7dwzg\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:56 crc kubenswrapper[4943]: I1129 07:14:56.915636 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:14:57 crc kubenswrapper[4943]: I1129 07:14:57.287790 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerStarted","Data":"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6"} Nov 29 07:14:57 crc kubenswrapper[4943]: I1129 07:14:57.290391 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerStarted","Data":"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38"} Nov 29 07:14:57 crc kubenswrapper[4943]: I1129 07:14:57.290434 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerStarted","Data":"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032"} Nov 29 07:14:57 crc kubenswrapper[4943]: I1129 07:14:57.313167 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.313148963 podStartE2EDuration="2.313148963s" podCreationTimestamp="2025-11-29 07:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:57.310242962 +0000 UTC m=+2472.240331725" watchObservedRunningTime="2025-11-29 07:14:57.313148963 +0000 UTC m=+2472.243237716" Nov 29 07:14:57 crc kubenswrapper[4943]: I1129 07:14:57.412592 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-7dwzg"] Nov 29 07:14:57 crc kubenswrapper[4943]: W1129 07:14:57.412715 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde1c9351_d792_4a07_94d1_24b480b1ec3b.slice/crio-06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e WatchSource:0}: Error finding container 06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e: Status 404 returned error can't find the container with id 06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.302215 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerStarted","Data":"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803"} Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.302590 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerStarted","Data":"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7"} Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.305986 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-7dwzg" event={"ID":"de1c9351-d792-4a07-94d1-24b480b1ec3b","Type":"ContainerStarted","Data":"1ef5568db0c7bb39afa9411d4dc05a41a7727e31d83acfc551c5be03668d7e46"} Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.306022 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-7dwzg" event={"ID":"de1c9351-d792-4a07-94d1-24b480b1ec3b","Type":"ContainerStarted","Data":"06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e"} Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.321939 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-7dwzg" podStartSLOduration=2.3219165999999998 podStartE2EDuration="2.3219166s" podCreationTimestamp="2025-11-29 07:14:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:14:58.319557871 +0000 UTC m=+2473.249646624" watchObservedRunningTime="2025-11-29 07:14:58.3219166 +0000 UTC m=+2473.252005353" Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.683784 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.740192 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:14:58 crc kubenswrapper[4943]: I1129 07:14:58.740426 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="dnsmasq-dns" containerID="cri-o://902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7" gracePeriod=10 Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.277301 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.316797 4943 generic.go:334] "Generic (PLEG): container finished" podID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerID="902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7" exitCode=0 Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.317839 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.318313 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" event={"ID":"4c770ec3-c23e-4afe-accb-3ad3b4aded53","Type":"ContainerDied","Data":"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7"} Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.318343 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-7hs8x" event={"ID":"4c770ec3-c23e-4afe-accb-3ad3b4aded53","Type":"ContainerDied","Data":"829707d599fad6fa41964fda9a606a0c49d100b35387868de116a6c9974c0a75"} Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.318360 4943 scope.go:117] "RemoveContainer" containerID="902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.345975 4943 scope.go:117] "RemoveContainer" containerID="9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.389992 4943 scope.go:117] "RemoveContainer" containerID="902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7" Nov 29 07:14:59 crc kubenswrapper[4943]: E1129 07:14:59.390948 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7\": container with ID starting with 902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7 not found: ID does not exist" containerID="902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.390979 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7"} err="failed to get container status \"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7\": rpc error: code = NotFound desc = could not find container \"902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7\": container with ID starting with 902e1eddbedee32fb97290b159f5b8eab93f1043ea5b271114e10bc4899a2bf7 not found: ID does not exist" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.391001 4943 scope.go:117] "RemoveContainer" containerID="9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c" Nov 29 07:14:59 crc kubenswrapper[4943]: E1129 07:14:59.391298 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c\": container with ID starting with 9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c not found: ID does not exist" containerID="9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.391352 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c"} err="failed to get container status \"9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c\": rpc error: code = NotFound desc = could not find container \"9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c\": container with ID starting with 9265288d093d7d02be69036d1102ef96afe8bc6b98199b1360588703a4e8ae9c not found: ID does not exist" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.403973 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb\") pod \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.404070 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb\") pod \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.404144 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjtlf\" (UniqueName: \"kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf\") pod \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.404179 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc\") pod \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.404253 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config\") pod \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\" (UID: \"4c770ec3-c23e-4afe-accb-3ad3b4aded53\") " Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.412650 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf" (OuterVolumeSpecName: "kube-api-access-zjtlf") pod "4c770ec3-c23e-4afe-accb-3ad3b4aded53" (UID: "4c770ec3-c23e-4afe-accb-3ad3b4aded53"). InnerVolumeSpecName "kube-api-access-zjtlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.455409 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4c770ec3-c23e-4afe-accb-3ad3b4aded53" (UID: "4c770ec3-c23e-4afe-accb-3ad3b4aded53"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.488501 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4c770ec3-c23e-4afe-accb-3ad3b4aded53" (UID: "4c770ec3-c23e-4afe-accb-3ad3b4aded53"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.496896 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config" (OuterVolumeSpecName: "config") pod "4c770ec3-c23e-4afe-accb-3ad3b4aded53" (UID: "4c770ec3-c23e-4afe-accb-3ad3b4aded53"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.505195 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4c770ec3-c23e-4afe-accb-3ad3b4aded53" (UID: "4c770ec3-c23e-4afe-accb-3ad3b4aded53"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.507111 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.507144 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.507155 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjtlf\" (UniqueName: \"kubernetes.io/projected/4c770ec3-c23e-4afe-accb-3ad3b4aded53-kube-api-access-zjtlf\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.507187 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.507205 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c770ec3-c23e-4afe-accb-3ad3b4aded53-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.653744 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:14:59 crc kubenswrapper[4943]: I1129 07:14:59.661706 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-7hs8x"] Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.138199 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm"] Nov 29 07:15:00 crc kubenswrapper[4943]: E1129 07:15:00.138708 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="dnsmasq-dns" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.138725 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="dnsmasq-dns" Nov 29 07:15:00 crc kubenswrapper[4943]: E1129 07:15:00.138754 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="init" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.138763 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="init" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.138963 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" containerName="dnsmasq-dns" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.139756 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.142853 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.143406 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.148665 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm"] Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.221312 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.221391 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.221428 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8s5b\" (UniqueName: \"kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.322729 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.322771 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8s5b\" (UniqueName: \"kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.322883 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.323763 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.337677 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.341185 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8s5b\" (UniqueName: \"kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b\") pod \"collect-profiles-29406675-flbnm\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.342443 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerStarted","Data":"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16"} Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.342939 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:15:00 crc kubenswrapper[4943]: I1129 07:15:00.458968 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:01 crc kubenswrapper[4943]: I1129 07:15:01.289721 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.299408034 podStartE2EDuration="7.289699838s" podCreationTimestamp="2025-11-29 07:14:54 +0000 UTC" firstStartedPulling="2025-11-29 07:14:55.412197985 +0000 UTC m=+2470.342286738" lastFinishedPulling="2025-11-29 07:14:59.402489789 +0000 UTC m=+2474.332578542" observedRunningTime="2025-11-29 07:15:00.93189614 +0000 UTC m=+2475.861984893" watchObservedRunningTime="2025-11-29 07:15:01.289699838 +0000 UTC m=+2476.219788591" Nov 29 07:15:01 crc kubenswrapper[4943]: I1129 07:15:01.293132 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm"] Nov 29 07:15:01 crc kubenswrapper[4943]: I1129 07:15:01.328296 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:15:01 crc kubenswrapper[4943]: E1129 07:15:01.328597 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:15:01 crc kubenswrapper[4943]: I1129 07:15:01.342193 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c770ec3-c23e-4afe-accb-3ad3b4aded53" path="/var/lib/kubelet/pods/4c770ec3-c23e-4afe-accb-3ad3b4aded53/volumes" Nov 29 07:15:01 crc kubenswrapper[4943]: I1129 07:15:01.354348 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" event={"ID":"1b482498-960e-427f-a487-36821bcf511e","Type":"ContainerStarted","Data":"9ceee632a7acbdbe75fdd478a701df949e3dc1c8064c9009a1855ae0a2cda24d"} Nov 29 07:15:02 crc kubenswrapper[4943]: I1129 07:15:02.363119 4943 generic.go:334] "Generic (PLEG): container finished" podID="1b482498-960e-427f-a487-36821bcf511e" containerID="2b49d16071ce569ba7ca1d69b6d59d1a6ff1c6da192906abda0037f88e8d7115" exitCode=0 Nov 29 07:15:02 crc kubenswrapper[4943]: I1129 07:15:02.363272 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" event={"ID":"1b482498-960e-427f-a487-36821bcf511e","Type":"ContainerDied","Data":"2b49d16071ce569ba7ca1d69b6d59d1a6ff1c6da192906abda0037f88e8d7115"} Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.376733 4943 generic.go:334] "Generic (PLEG): container finished" podID="de1c9351-d792-4a07-94d1-24b480b1ec3b" containerID="1ef5568db0c7bb39afa9411d4dc05a41a7727e31d83acfc551c5be03668d7e46" exitCode=0 Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.376887 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-7dwzg" event={"ID":"de1c9351-d792-4a07-94d1-24b480b1ec3b","Type":"ContainerDied","Data":"1ef5568db0c7bb39afa9411d4dc05a41a7727e31d83acfc551c5be03668d7e46"} Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.750481 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.838409 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume\") pod \"1b482498-960e-427f-a487-36821bcf511e\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.838670 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8s5b\" (UniqueName: \"kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b\") pod \"1b482498-960e-427f-a487-36821bcf511e\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.838761 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume\") pod \"1b482498-960e-427f-a487-36821bcf511e\" (UID: \"1b482498-960e-427f-a487-36821bcf511e\") " Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.840083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume" (OuterVolumeSpecName: "config-volume") pod "1b482498-960e-427f-a487-36821bcf511e" (UID: "1b482498-960e-427f-a487-36821bcf511e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.848407 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b" (OuterVolumeSpecName: "kube-api-access-b8s5b") pod "1b482498-960e-427f-a487-36821bcf511e" (UID: "1b482498-960e-427f-a487-36821bcf511e"). InnerVolumeSpecName "kube-api-access-b8s5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.848401 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1b482498-960e-427f-a487-36821bcf511e" (UID: "1b482498-960e-427f-a487-36821bcf511e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.940670 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8s5b\" (UniqueName: \"kubernetes.io/projected/1b482498-960e-427f-a487-36821bcf511e-kube-api-access-b8s5b\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.940718 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b482498-960e-427f-a487-36821bcf511e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:03 crc kubenswrapper[4943]: I1129 07:15:03.940728 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b482498-960e-427f-a487-36821bcf511e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.388231 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.391637 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm" event={"ID":"1b482498-960e-427f-a487-36821bcf511e","Type":"ContainerDied","Data":"9ceee632a7acbdbe75fdd478a701df949e3dc1c8064c9009a1855ae0a2cda24d"} Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.391812 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ceee632a7acbdbe75fdd478a701df949e3dc1c8064c9009a1855ae0a2cda24d" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.718008 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.831149 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj"] Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.839345 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406630-5fztj"] Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.856558 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle\") pod \"de1c9351-d792-4a07-94d1-24b480b1ec3b\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.856671 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5qpt\" (UniqueName: \"kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt\") pod \"de1c9351-d792-4a07-94d1-24b480b1ec3b\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.856753 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data\") pod \"de1c9351-d792-4a07-94d1-24b480b1ec3b\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.856824 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts\") pod \"de1c9351-d792-4a07-94d1-24b480b1ec3b\" (UID: \"de1c9351-d792-4a07-94d1-24b480b1ec3b\") " Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.862797 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts" (OuterVolumeSpecName: "scripts") pod "de1c9351-d792-4a07-94d1-24b480b1ec3b" (UID: "de1c9351-d792-4a07-94d1-24b480b1ec3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.862999 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt" (OuterVolumeSpecName: "kube-api-access-p5qpt") pod "de1c9351-d792-4a07-94d1-24b480b1ec3b" (UID: "de1c9351-d792-4a07-94d1-24b480b1ec3b"). InnerVolumeSpecName "kube-api-access-p5qpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.888936 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data" (OuterVolumeSpecName: "config-data") pod "de1c9351-d792-4a07-94d1-24b480b1ec3b" (UID: "de1c9351-d792-4a07-94d1-24b480b1ec3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.890225 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de1c9351-d792-4a07-94d1-24b480b1ec3b" (UID: "de1c9351-d792-4a07-94d1-24b480b1ec3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.959092 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.959127 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5qpt\" (UniqueName: \"kubernetes.io/projected/de1c9351-d792-4a07-94d1-24b480b1ec3b-kube-api-access-p5qpt\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.959142 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:04 crc kubenswrapper[4943]: I1129 07:15:04.959153 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de1c9351-d792-4a07-94d1-24b480b1ec3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.343095 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4af90184-bb3b-455d-a9dc-9e120c08b3c7" path="/var/lib/kubelet/pods/4af90184-bb3b-455d-a9dc-9e120c08b3c7/volumes" Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.398453 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-7dwzg" event={"ID":"de1c9351-d792-4a07-94d1-24b480b1ec3b","Type":"ContainerDied","Data":"06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e"} Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.398501 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06f70a266f81a7a03fe6ddbcc40dc2c1645b8565761e6c23b2a2528f45e0fc5e" Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.398509 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-7dwzg" Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.585403 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.585736 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-log" containerID="cri-o://1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" gracePeriod=30 Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.585814 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-api" containerID="cri-o://2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" gracePeriod=30 Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.602262 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.602501 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="018a61fe-e314-4c8f-b99b-df417631b935" containerName="nova-scheduler-scheduler" containerID="cri-o://f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149" gracePeriod=30 Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.614018 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.614344 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" containerID="cri-o://0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9" gracePeriod=30 Nov 29 07:15:05 crc kubenswrapper[4943]: I1129 07:15:05.614267 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" containerID="cri-o://48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f" gracePeriod=30 Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.100971 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180189 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180276 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180341 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48g74\" (UniqueName: \"kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180366 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180434 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180529 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs\") pod \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\" (UID: \"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d\") " Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.180859 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs" (OuterVolumeSpecName: "logs") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.181014 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.184580 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74" (OuterVolumeSpecName: "kube-api-access-48g74") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "kube-api-access-48g74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.204760 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.205764 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data" (OuterVolumeSpecName: "config-data") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.233551 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.235267 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" (UID: "301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.282779 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.282817 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.282830 4943 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.282842 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48g74\" (UniqueName: \"kubernetes.io/projected/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-kube-api-access-48g74\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.282854 4943 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409135 4943 generic.go:334] "Generic (PLEG): container finished" podID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerID="2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" exitCode=0 Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409169 4943 generic.go:334] "Generic (PLEG): container finished" podID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerID="1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" exitCode=143 Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerDied","Data":"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38"} Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409250 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409273 4943 scope.go:117] "RemoveContainer" containerID="2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409261 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerDied","Data":"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032"} Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.409477 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d","Type":"ContainerDied","Data":"c8f29393d38a8edc26475f16ddd9f54f223223a834d7495de8435d043ad19203"} Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.413960 4943 generic.go:334] "Generic (PLEG): container finished" podID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerID="48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f" exitCode=143 Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.414008 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerDied","Data":"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f"} Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.443837 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.444129 4943 scope.go:117] "RemoveContainer" containerID="1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.454482 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.466972 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.467397 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1c9351-d792-4a07-94d1-24b480b1ec3b" containerName="nova-manage" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467426 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1c9351-d792-4a07-94d1-24b480b1ec3b" containerName="nova-manage" Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.467448 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-api" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467456 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-api" Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.467475 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b482498-960e-427f-a487-36821bcf511e" containerName="collect-profiles" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467482 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b482498-960e-427f-a487-36821bcf511e" containerName="collect-profiles" Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.467514 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-log" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467524 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-log" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467825 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="de1c9351-d792-4a07-94d1-24b480b1ec3b" containerName="nova-manage" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467850 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-api" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467862 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" containerName="nova-api-log" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.467873 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b482498-960e-427f-a487-36821bcf511e" containerName="collect-profiles" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.469106 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.472598 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.472654 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.473802 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.478198 4943 scope.go:117] "RemoveContainer" containerID="2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.478842 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38\": container with ID starting with 2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38 not found: ID does not exist" containerID="2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.478882 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38"} err="failed to get container status \"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38\": rpc error: code = NotFound desc = could not find container \"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38\": container with ID starting with 2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38 not found: ID does not exist" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.478928 4943 scope.go:117] "RemoveContainer" containerID="1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" Nov 29 07:15:06 crc kubenswrapper[4943]: E1129 07:15:06.479312 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032\": container with ID starting with 1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032 not found: ID does not exist" containerID="1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.479357 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032"} err="failed to get container status \"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032\": rpc error: code = NotFound desc = could not find container \"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032\": container with ID starting with 1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032 not found: ID does not exist" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.479385 4943 scope.go:117] "RemoveContainer" containerID="2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.479723 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38"} err="failed to get container status \"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38\": rpc error: code = NotFound desc = could not find container \"2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38\": container with ID starting with 2a6b2c45eb0384816cd76b9e131f6ad7136d6c276676648b3bc493e1a225cc38 not found: ID does not exist" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.479763 4943 scope.go:117] "RemoveContainer" containerID="1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.480083 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032"} err="failed to get container status \"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032\": rpc error: code = NotFound desc = could not find container \"1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032\": container with ID starting with 1261477dad28b7ab56b48380e8f495dca3a2edd0213314d26291fdcf22290032 not found: ID does not exist" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.485967 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588520 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7qn5\" (UniqueName: \"kubernetes.io/projected/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-kube-api-access-r7qn5\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588705 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-config-data\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588734 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588826 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588889 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-public-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.588912 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-logs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.689978 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7qn5\" (UniqueName: \"kubernetes.io/projected/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-kube-api-access-r7qn5\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690052 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-config-data\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690075 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690113 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690143 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-public-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690161 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-logs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.690729 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-logs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.694531 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.695384 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-public-tls-certs\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.698310 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.699039 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-config-data\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.709065 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7qn5\" (UniqueName: \"kubernetes.io/projected/7a73d989-5ec4-4b31-a47b-7b9bcda756cf-kube-api-access-r7qn5\") pod \"nova-api-0\" (UID: \"7a73d989-5ec4-4b31-a47b-7b9bcda756cf\") " pod="openstack/nova-api-0" Nov 29 07:15:06 crc kubenswrapper[4943]: I1129 07:15:06.788513 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 07:15:07 crc kubenswrapper[4943]: W1129 07:15:07.221689 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a73d989_5ec4_4b31_a47b_7b9bcda756cf.slice/crio-d429960b7e0065e01c9f54b9d566352d97674c04c2aaf0eaf36f419e5d10e789 WatchSource:0}: Error finding container d429960b7e0065e01c9f54b9d566352d97674c04c2aaf0eaf36f419e5d10e789: Status 404 returned error can't find the container with id d429960b7e0065e01c9f54b9d566352d97674c04c2aaf0eaf36f419e5d10e789 Nov 29 07:15:07 crc kubenswrapper[4943]: I1129 07:15:07.221868 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 07:15:07 crc kubenswrapper[4943]: I1129 07:15:07.338048 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d" path="/var/lib/kubelet/pods/301d3de7-b8b8-4d8e-90b9-0ca150d3cb7d/volumes" Nov 29 07:15:07 crc kubenswrapper[4943]: I1129 07:15:07.428677 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7a73d989-5ec4-4b31-a47b-7b9bcda756cf","Type":"ContainerStarted","Data":"78d7be4ed3cfc147b5966b54ac57ddbee130d42fb116b187d35091d82e2eeb57"} Nov 29 07:15:07 crc kubenswrapper[4943]: I1129 07:15:07.428719 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7a73d989-5ec4-4b31-a47b-7b9bcda756cf","Type":"ContainerStarted","Data":"d429960b7e0065e01c9f54b9d566352d97674c04c2aaf0eaf36f419e5d10e789"} Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.051692 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.114326 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkxjx\" (UniqueName: \"kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx\") pod \"018a61fe-e314-4c8f-b99b-df417631b935\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.114406 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data\") pod \"018a61fe-e314-4c8f-b99b-df417631b935\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.114496 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle\") pod \"018a61fe-e314-4c8f-b99b-df417631b935\" (UID: \"018a61fe-e314-4c8f-b99b-df417631b935\") " Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.119573 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx" (OuterVolumeSpecName: "kube-api-access-rkxjx") pod "018a61fe-e314-4c8f-b99b-df417631b935" (UID: "018a61fe-e314-4c8f-b99b-df417631b935"). InnerVolumeSpecName "kube-api-access-rkxjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.140951 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data" (OuterVolumeSpecName: "config-data") pod "018a61fe-e314-4c8f-b99b-df417631b935" (UID: "018a61fe-e314-4c8f-b99b-df417631b935"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.143186 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "018a61fe-e314-4c8f-b99b-df417631b935" (UID: "018a61fe-e314-4c8f-b99b-df417631b935"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.217025 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkxjx\" (UniqueName: \"kubernetes.io/projected/018a61fe-e314-4c8f-b99b-df417631b935-kube-api-access-rkxjx\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.217061 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.217072 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018a61fe-e314-4c8f-b99b-df417631b935-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.438244 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7a73d989-5ec4-4b31-a47b-7b9bcda756cf","Type":"ContainerStarted","Data":"bbcfd48492d7dcc213bc91a4f86ebaa61aa7ea607a281d709b058c7256367979"} Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.440101 4943 generic.go:334] "Generic (PLEG): container finished" podID="018a61fe-e314-4c8f-b99b-df417631b935" containerID="f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149" exitCode=0 Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.440156 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"018a61fe-e314-4c8f-b99b-df417631b935","Type":"ContainerDied","Data":"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149"} Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.440160 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.440180 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"018a61fe-e314-4c8f-b99b-df417631b935","Type":"ContainerDied","Data":"dbdf1f66eddb389590aba09842bce5f3c00942d34535c1b4fcba236f4c685a93"} Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.440204 4943 scope.go:117] "RemoveContainer" containerID="f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.459525 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.459507753 podStartE2EDuration="2.459507753s" podCreationTimestamp="2025-11-29 07:15:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:15:08.456531639 +0000 UTC m=+2483.386620412" watchObservedRunningTime="2025-11-29 07:15:08.459507753 +0000 UTC m=+2483.389596506" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.465619 4943 scope.go:117] "RemoveContainer" containerID="f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149" Nov 29 07:15:08 crc kubenswrapper[4943]: E1129 07:15:08.468403 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149\": container with ID starting with f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149 not found: ID does not exist" containerID="f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.468468 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149"} err="failed to get container status \"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149\": rpc error: code = NotFound desc = could not find container \"f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149\": container with ID starting with f18ef80b253fa129b6015e5087af0ce5f94c517e6e85041da63474eeb8aca149 not found: ID does not exist" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.481204 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.494676 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.506579 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:08 crc kubenswrapper[4943]: E1129 07:15:08.506972 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018a61fe-e314-4c8f-b99b-df417631b935" containerName="nova-scheduler-scheduler" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.507002 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="018a61fe-e314-4c8f-b99b-df417631b935" containerName="nova-scheduler-scheduler" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.507223 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="018a61fe-e314-4c8f-b99b-df417631b935" containerName="nova-scheduler-scheduler" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.507901 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.516170 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.518068 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.623267 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-config-data\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.623325 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvg45\" (UniqueName: \"kubernetes.io/projected/c35e524d-9a33-483b-a6d8-98a7ace4b632-kube-api-access-xvg45\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.623428 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.725449 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-config-data\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.725523 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvg45\" (UniqueName: \"kubernetes.io/projected/c35e524d-9a33-483b-a6d8-98a7ace4b632-kube-api-access-xvg45\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.725620 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.730698 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.730656 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35e524d-9a33-483b-a6d8-98a7ace4b632-config-data\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.742842 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvg45\" (UniqueName: \"kubernetes.io/projected/c35e524d-9a33-483b-a6d8-98a7ace4b632-kube-api-access-xvg45\") pod \"nova-scheduler-0\" (UID: \"c35e524d-9a33-483b-a6d8-98a7ace4b632\") " pod="openstack/nova-scheduler-0" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.749027 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:42192->10.217.0.188:8775: read: connection reset by peer" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.749309 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.188:8775/\": read tcp 10.217.0.2:42182->10.217.0.188:8775: read: connection reset by peer" Nov 29 07:15:08 crc kubenswrapper[4943]: I1129 07:15:08.837262 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.142505 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.237837 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle\") pod \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.237924 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs\") pod \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.237984 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data\") pod \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.238109 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs\") pod \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.238166 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgbz5\" (UniqueName: \"kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5\") pod \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\" (UID: \"d302191c-c05e-41a3-b6f0-e8e1e8358a94\") " Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.238630 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs" (OuterVolumeSpecName: "logs") pod "d302191c-c05e-41a3-b6f0-e8e1e8358a94" (UID: "d302191c-c05e-41a3-b6f0-e8e1e8358a94"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.244938 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5" (OuterVolumeSpecName: "kube-api-access-vgbz5") pod "d302191c-c05e-41a3-b6f0-e8e1e8358a94" (UID: "d302191c-c05e-41a3-b6f0-e8e1e8358a94"). InnerVolumeSpecName "kube-api-access-vgbz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.263742 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d302191c-c05e-41a3-b6f0-e8e1e8358a94" (UID: "d302191c-c05e-41a3-b6f0-e8e1e8358a94"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.274743 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data" (OuterVolumeSpecName: "config-data") pod "d302191c-c05e-41a3-b6f0-e8e1e8358a94" (UID: "d302191c-c05e-41a3-b6f0-e8e1e8358a94"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.288773 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d302191c-c05e-41a3-b6f0-e8e1e8358a94" (UID: "d302191c-c05e-41a3-b6f0-e8e1e8358a94"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.293902 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.340389 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.340426 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d302191c-c05e-41a3-b6f0-e8e1e8358a94-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.340441 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgbz5\" (UniqueName: \"kubernetes.io/projected/d302191c-c05e-41a3-b6f0-e8e1e8358a94-kube-api-access-vgbz5\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.340451 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.340460 4943 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d302191c-c05e-41a3-b6f0-e8e1e8358a94-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.346861 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="018a61fe-e314-4c8f-b99b-df417631b935" path="/var/lib/kubelet/pods/018a61fe-e314-4c8f-b99b-df417631b935/volumes" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.453809 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c35e524d-9a33-483b-a6d8-98a7ace4b632","Type":"ContainerStarted","Data":"924601e6b6493e29b661e247900199b4ebd0b202d41f874dc2077fcc1e2ffaf6"} Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.456002 4943 generic.go:334] "Generic (PLEG): container finished" podID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerID="0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9" exitCode=0 Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.456049 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerDied","Data":"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9"} Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.456096 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.456122 4943 scope.go:117] "RemoveContainer" containerID="0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.456109 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d302191c-c05e-41a3-b6f0-e8e1e8358a94","Type":"ContainerDied","Data":"50450c4afdd72dbdf8aaeab27c162cde21cf02d4feee44cef16413b22129f001"} Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.481686 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.489192 4943 scope.go:117] "RemoveContainer" containerID="48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.496747 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.507414 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:09 crc kubenswrapper[4943]: E1129 07:15:09.507982 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.508004 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" Nov 29 07:15:09 crc kubenswrapper[4943]: E1129 07:15:09.508046 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.508055 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.508242 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-metadata" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.508278 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" containerName="nova-metadata-log" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.509637 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.512877 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.513094 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.513889 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.517755 4943 scope.go:117] "RemoveContainer" containerID="0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9" Nov 29 07:15:09 crc kubenswrapper[4943]: E1129 07:15:09.519017 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9\": container with ID starting with 0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9 not found: ID does not exist" containerID="0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.519058 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9"} err="failed to get container status \"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9\": rpc error: code = NotFound desc = could not find container \"0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9\": container with ID starting with 0fd924a254a9d862df48c4cdd81cddf143483daa726c1f04fc5c84a1f1114ce9 not found: ID does not exist" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.519084 4943 scope.go:117] "RemoveContainer" containerID="48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f" Nov 29 07:15:09 crc kubenswrapper[4943]: E1129 07:15:09.532748 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f\": container with ID starting with 48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f not found: ID does not exist" containerID="48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.532805 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f"} err="failed to get container status \"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f\": rpc error: code = NotFound desc = could not find container \"48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f\": container with ID starting with 48ecc8c840310f64003dd9c1879038fa94ee87fdae15bdd3801cf237b2e2531f not found: ID does not exist" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.543681 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-logs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.543749 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.543772 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9gcs\" (UniqueName: \"kubernetes.io/projected/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-kube-api-access-g9gcs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.543797 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.543861 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-config-data\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645362 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-logs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645460 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645505 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9gcs\" (UniqueName: \"kubernetes.io/projected/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-kube-api-access-g9gcs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645549 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645650 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-config-data\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.645892 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-logs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.649059 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.649535 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.649686 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-config-data\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.662674 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9gcs\" (UniqueName: \"kubernetes.io/projected/6a8c1a66-0365-4e4a-8c6c-6721a50e7468-kube-api-access-g9gcs\") pod \"nova-metadata-0\" (UID: \"6a8c1a66-0365-4e4a-8c6c-6721a50e7468\") " pod="openstack/nova-metadata-0" Nov 29 07:15:09 crc kubenswrapper[4943]: I1129 07:15:09.833718 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 07:15:10 crc kubenswrapper[4943]: I1129 07:15:10.296831 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 07:15:10 crc kubenswrapper[4943]: I1129 07:15:10.467405 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c35e524d-9a33-483b-a6d8-98a7ace4b632","Type":"ContainerStarted","Data":"d66e13781d4fb633c1f8ca1cbe78966903b1ba7a30188c62d78d6411d2f09a1f"} Nov 29 07:15:10 crc kubenswrapper[4943]: I1129 07:15:10.470737 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6a8c1a66-0365-4e4a-8c6c-6721a50e7468","Type":"ContainerStarted","Data":"93ad896531a695ae530df77aed8dfbf0308b72558c9137f3dab15fdde26ab132"} Nov 29 07:15:10 crc kubenswrapper[4943]: I1129 07:15:10.489002 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.488985922 podStartE2EDuration="2.488985922s" podCreationTimestamp="2025-11-29 07:15:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:15:10.481542399 +0000 UTC m=+2485.411631152" watchObservedRunningTime="2025-11-29 07:15:10.488985922 +0000 UTC m=+2485.419074675" Nov 29 07:15:11 crc kubenswrapper[4943]: I1129 07:15:11.342483 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d302191c-c05e-41a3-b6f0-e8e1e8358a94" path="/var/lib/kubelet/pods/d302191c-c05e-41a3-b6f0-e8e1e8358a94/volumes" Nov 29 07:15:11 crc kubenswrapper[4943]: I1129 07:15:11.481521 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6a8c1a66-0365-4e4a-8c6c-6721a50e7468","Type":"ContainerStarted","Data":"31e933f90d4bcf7536927b12e6babc234c822008f7646dc1b72c46d91be8a53e"} Nov 29 07:15:11 crc kubenswrapper[4943]: I1129 07:15:11.481582 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6a8c1a66-0365-4e4a-8c6c-6721a50e7468","Type":"ContainerStarted","Data":"201b38752e7caa2402ffa4ebf98ed75ff5ad4c3b83a71a73145681342a8039f8"} Nov 29 07:15:11 crc kubenswrapper[4943]: I1129 07:15:11.500870 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.5008503859999998 podStartE2EDuration="2.500850386s" podCreationTimestamp="2025-11-29 07:15:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:15:11.498773224 +0000 UTC m=+2486.428861987" watchObservedRunningTime="2025-11-29 07:15:11.500850386 +0000 UTC m=+2486.430939159" Nov 29 07:15:13 crc kubenswrapper[4943]: I1129 07:15:13.837587 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 07:15:14 crc kubenswrapper[4943]: I1129 07:15:14.329047 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:15:14 crc kubenswrapper[4943]: E1129 07:15:14.329417 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:15:14 crc kubenswrapper[4943]: I1129 07:15:14.834856 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:15:14 crc kubenswrapper[4943]: I1129 07:15:14.834907 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 07:15:16 crc kubenswrapper[4943]: I1129 07:15:16.789324 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 07:15:16 crc kubenswrapper[4943]: I1129 07:15:16.789647 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 07:15:17 crc kubenswrapper[4943]: I1129 07:15:17.804135 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7a73d989-5ec4-4b31-a47b-7b9bcda756cf" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 07:15:17 crc kubenswrapper[4943]: I1129 07:15:17.804779 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7a73d989-5ec4-4b31-a47b-7b9bcda756cf" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:15:18 crc kubenswrapper[4943]: I1129 07:15:18.838463 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 29 07:15:18 crc kubenswrapper[4943]: I1129 07:15:18.870191 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 29 07:15:19 crc kubenswrapper[4943]: I1129 07:15:19.569512 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 29 07:15:19 crc kubenswrapper[4943]: I1129 07:15:19.834190 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 07:15:19 crc kubenswrapper[4943]: I1129 07:15:19.834241 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 07:15:20 crc kubenswrapper[4943]: I1129 07:15:20.848764 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6a8c1a66-0365-4e4a-8c6c-6721a50e7468" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:15:20 crc kubenswrapper[4943]: I1129 07:15:20.848786 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6a8c1a66-0365-4e4a-8c6c-6721a50e7468" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 07:15:24 crc kubenswrapper[4943]: I1129 07:15:24.951400 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 07:15:26 crc kubenswrapper[4943]: I1129 07:15:26.795798 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 07:15:26 crc kubenswrapper[4943]: I1129 07:15:26.796379 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 07:15:26 crc kubenswrapper[4943]: I1129 07:15:26.800109 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 07:15:26 crc kubenswrapper[4943]: I1129 07:15:26.803920 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 07:15:27 crc kubenswrapper[4943]: I1129 07:15:27.327827 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:15:27 crc kubenswrapper[4943]: E1129 07:15:27.328441 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:15:27 crc kubenswrapper[4943]: I1129 07:15:27.637484 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 07:15:27 crc kubenswrapper[4943]: I1129 07:15:27.776027 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 07:15:29 crc kubenswrapper[4943]: I1129 07:15:29.842633 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 07:15:29 crc kubenswrapper[4943]: I1129 07:15:29.846053 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 07:15:29 crc kubenswrapper[4943]: I1129 07:15:29.848586 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 07:15:30 crc kubenswrapper[4943]: I1129 07:15:30.664284 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 07:15:39 crc kubenswrapper[4943]: I1129 07:15:39.635828 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:40 crc kubenswrapper[4943]: I1129 07:15:40.476403 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:41 crc kubenswrapper[4943]: I1129 07:15:41.327598 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:15:41 crc kubenswrapper[4943]: E1129 07:15:41.328229 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:15:43 crc kubenswrapper[4943]: I1129 07:15:43.599437 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" containerID="cri-o://1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa" gracePeriod=604797 Nov 29 07:15:44 crc kubenswrapper[4943]: I1129 07:15:44.420519 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" containerID="cri-o://2088ab4e238ed6a338a2136a8502ad6d693eb3bb0a2120b43361a2075d614487" gracePeriod=604797 Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.373193 4943 scope.go:117] "RemoveContainer" containerID="540021f999e4c8d4aadddf42db9b9abca345e7a35b470e82498ca1339e2b2120" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.696461 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813459 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813586 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813615 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813633 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813652 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813682 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813700 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813717 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813806 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813847 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.813873 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clkpj\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj\") pod \"5783e23b-47b8-4bbe-99aa-29271dc74d51\" (UID: \"5783e23b-47b8-4bbe-99aa-29271dc74d51\") " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.815029 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.818240 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.818441 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.826379 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj" (OuterVolumeSpecName: "kube-api-access-clkpj") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "kube-api-access-clkpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.826400 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.828865 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info" (OuterVolumeSpecName: "pod-info") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.832096 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.846976 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.856155 4943 generic.go:334] "Generic (PLEG): container finished" podID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerID="1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa" exitCode=0 Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.856248 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerDied","Data":"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa"} Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.856273 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5783e23b-47b8-4bbe-99aa-29271dc74d51","Type":"ContainerDied","Data":"aaab6ce2ab4f44b00bfef6837d6fcbff565283e4b01c6d9bce80aaa1b439d123"} Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.856289 4943 scope.go:117] "RemoveContainer" containerID="1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.856404 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.871337 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data" (OuterVolumeSpecName: "config-data") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.873609 4943 generic.go:334] "Generic (PLEG): container finished" podID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerID="2088ab4e238ed6a338a2136a8502ad6d693eb3bb0a2120b43361a2075d614487" exitCode=0 Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.873658 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerDied","Data":"2088ab4e238ed6a338a2136a8502ad6d693eb3bb0a2120b43361a2075d614487"} Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.917890 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918228 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clkpj\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-kube-api-access-clkpj\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918252 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918263 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918271 4943 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5783e23b-47b8-4bbe-99aa-29271dc74d51-pod-info\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918279 4943 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918288 4943 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5783e23b-47b8-4bbe-99aa-29271dc74d51-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918298 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.918306 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.924267 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf" (OuterVolumeSpecName: "server-conf") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:50 crc kubenswrapper[4943]: I1129 07:15:50.962421 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.051616 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.051655 4943 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5783e23b-47b8-4bbe-99aa-29271dc74d51-server-conf\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.172740 4943 scope.go:117] "RemoveContainer" containerID="e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.197836 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5783e23b-47b8-4bbe-99aa-29271dc74d51" (UID: "5783e23b-47b8-4bbe-99aa-29271dc74d51"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.242717 4943 scope.go:117] "RemoveContainer" containerID="1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa" Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.243821 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa\": container with ID starting with 1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa not found: ID does not exist" containerID="1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.243856 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa"} err="failed to get container status \"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa\": rpc error: code = NotFound desc = could not find container \"1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa\": container with ID starting with 1dbdfe74a41fb5d19fa8a99c284c3513b5f7d1290eff35f3405b230a4944a9fa not found: ID does not exist" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.243878 4943 scope.go:117] "RemoveContainer" containerID="e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1" Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.244165 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1\": container with ID starting with e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1 not found: ID does not exist" containerID="e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.244181 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1"} err="failed to get container status \"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1\": rpc error: code = NotFound desc = could not find container \"e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1\": container with ID starting with e2fd5ec7e47b206df43ea2ecb3985aa4d9d8327318d707beab14988ee1b5aab1 not found: ID does not exist" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.257750 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5783e23b-47b8-4bbe-99aa-29271dc74d51-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.311691 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.459841 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.459874 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.459957 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460013 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460041 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460095 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460122 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460166 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460204 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460236 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x9v2\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.460264 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf\") pod \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\" (UID: \"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263\") " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.461881 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.462285 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.462598 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.465180 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info" (OuterVolumeSpecName: "pod-info") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.465387 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.465747 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.465869 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.465911 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2" (OuterVolumeSpecName: "kube-api-access-7x9v2") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "kube-api-access-7x9v2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.484377 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.490537 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data" (OuterVolumeSpecName: "config-data") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.492135 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.520402 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.520903 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="setup-container" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.520921 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="setup-container" Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.520960 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="setup-container" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.520969 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="setup-container" Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.520984 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.520994 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: E1129 07:15:51.521015 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.521022 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.521239 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.521261 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" containerName="rabbitmq" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.522505 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.523852 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.525466 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.525745 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.525845 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.525952 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9r7k8" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.526086 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.526193 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.531747 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562244 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562307 4943 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562327 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562366 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562379 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x9v2\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-kube-api-access-7x9v2\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562390 4943 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562402 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562413 4943 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-pod-info\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.562469 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.567752 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf" (OuterVolumeSpecName: "server-conf") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.583311 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.612083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" (UID: "18c9e0d2-f13e-4af5-9f57-22f3f4c6b263"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664376 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2373f83-f75a-4f85-a8dd-133f36458591-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664711 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-config-data\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664748 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2373f83-f75a-4f85-a8dd-133f36458591-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664765 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6ghg\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-kube-api-access-h6ghg\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664808 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.664990 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665085 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665145 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665232 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665336 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665523 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665714 4943 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-server-conf\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665741 4943 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.665755 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767692 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767746 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767815 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767854 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767888 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767915 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767971 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.767986 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2373f83-f75a-4f85-a8dd-133f36458591-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768006 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-config-data\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768034 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2373f83-f75a-4f85-a8dd-133f36458591-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768050 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6ghg\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-kube-api-access-h6ghg\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768400 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768619 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.768656 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.769582 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-config-data\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.769688 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.770086 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2373f83-f75a-4f85-a8dd-133f36458591-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.771881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.773096 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.775249 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2373f83-f75a-4f85-a8dd-133f36458591-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.775370 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2373f83-f75a-4f85-a8dd-133f36458591-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.784440 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6ghg\" (UniqueName: \"kubernetes.io/projected/b2373f83-f75a-4f85-a8dd-133f36458591-kube-api-access-h6ghg\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.810631 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"b2373f83-f75a-4f85-a8dd-133f36458591\") " pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.842940 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.889166 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18c9e0d2-f13e-4af5-9f57-22f3f4c6b263","Type":"ContainerDied","Data":"5b7c7b01abab4561ef0567d9081d8a6b872b084ed50aa550222e49356fa4ff59"} Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.889209 4943 scope.go:117] "RemoveContainer" containerID="2088ab4e238ed6a338a2136a8502ad6d693eb3bb0a2120b43361a2075d614487" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.889295 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:51 crc kubenswrapper[4943]: I1129 07:15:51.983344 4943 scope.go:117] "RemoveContainer" containerID="ddd5cf83322bdfcf75f871299db3066abf8c67fc794c434dbee1ed9734efc31a" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.026614 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.037001 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.047316 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.051344 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.054408 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.054777 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.054854 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.054941 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.054992 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.055239 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.055312 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-xnlbn" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.055241 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.128140 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182090 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182118 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182140 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182169 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba80f24b-269a-42ff-b97e-94623499b030-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182190 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182207 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182226 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68qn4\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-kube-api-access-68qn4\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182256 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba80f24b-269a-42ff-b97e-94623499b030-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182277 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.182297 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284468 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284553 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284617 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284644 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284686 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba80f24b-269a-42ff-b97e-94623499b030-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284742 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284768 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68qn4\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-kube-api-access-68qn4\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284802 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba80f24b-269a-42ff-b97e-94623499b030-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284827 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.284854 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.285202 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.285671 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.285734 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.286666 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.287136 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.288356 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba80f24b-269a-42ff-b97e-94623499b030-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.289437 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba80f24b-269a-42ff-b97e-94623499b030-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.290334 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.290972 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba80f24b-269a-42ff-b97e-94623499b030-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.292088 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.304432 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68qn4\" (UniqueName: \"kubernetes.io/projected/ba80f24b-269a-42ff-b97e-94623499b030-kube-api-access-68qn4\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.315287 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ba80f24b-269a-42ff-b97e-94623499b030\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.386097 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.820597 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.901529 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ba80f24b-269a-42ff-b97e-94623499b030","Type":"ContainerStarted","Data":"9a354281facc44915ee9186f60ca4253dfd2d335fd3178d2066ec21ffc3751d2"} Nov 29 07:15:52 crc kubenswrapper[4943]: I1129 07:15:52.902872 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b2373f83-f75a-4f85-a8dd-133f36458591","Type":"ContainerStarted","Data":"d374f2b4b6fda514fca87ec6e861660b4f6240c18852d7128cb1c4a992c6c91f"} Nov 29 07:15:53 crc kubenswrapper[4943]: I1129 07:15:53.338930 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18c9e0d2-f13e-4af5-9f57-22f3f4c6b263" path="/var/lib/kubelet/pods/18c9e0d2-f13e-4af5-9f57-22f3f4c6b263/volumes" Nov 29 07:15:53 crc kubenswrapper[4943]: I1129 07:15:53.340314 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5783e23b-47b8-4bbe-99aa-29271dc74d51" path="/var/lib/kubelet/pods/5783e23b-47b8-4bbe-99aa-29271dc74d51/volumes" Nov 29 07:15:53 crc kubenswrapper[4943]: I1129 07:15:53.913486 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b2373f83-f75a-4f85-a8dd-133f36458591","Type":"ContainerStarted","Data":"94ae004a8fa8c358501a62ecd041b42ccb2c77cc5076edc2d30b4f6f7f13aea5"} Nov 29 07:15:54 crc kubenswrapper[4943]: I1129 07:15:54.923211 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ba80f24b-269a-42ff-b97e-94623499b030","Type":"ContainerStarted","Data":"f2674c69e270a6e70906b951eaf45b841fba6da6abc7530603d6392c1efcda5a"} Nov 29 07:15:55 crc kubenswrapper[4943]: I1129 07:15:55.332758 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:15:55 crc kubenswrapper[4943]: E1129 07:15:55.333014 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.820116 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.822971 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.826004 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.836267 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.868221 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.868605 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnlbj\" (UniqueName: \"kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.868659 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.868914 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.868995 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.869042 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970639 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970683 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnlbj\" (UniqueName: \"kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970703 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970729 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970770 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.970806 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.971582 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.971711 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.971828 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.971874 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.972049 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:56 crc kubenswrapper[4943]: I1129 07:15:56.990492 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnlbj\" (UniqueName: \"kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj\") pod \"dnsmasq-dns-6447ccbd8f-mdwdt\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.014350 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.015186 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.067122 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.069921 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.086632 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.174541 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.175000 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.175058 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.175163 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.175252 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lpz9\" (UniqueName: \"kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.175330 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277353 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277490 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277592 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lpz9\" (UniqueName: \"kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277646 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277904 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.277938 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.278886 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.278886 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.279172 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.279551 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.279649 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.297459 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lpz9\" (UniqueName: \"kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9\") pod \"dnsmasq-dns-864d5fc68c-fr7bp\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.468139 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:15:57 crc kubenswrapper[4943]: W1129 07:15:57.587235 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09af807a_c4a1_4589_8fdf_57401b450bd4.slice/crio-81e6ca7b5dc1f8178eaf8ff857c18d274093e8bc584c998b107fbb387f179b23 WatchSource:0}: Error finding container 81e6ca7b5dc1f8178eaf8ff857c18d274093e8bc584c998b107fbb387f179b23: Status 404 returned error can't find the container with id 81e6ca7b5dc1f8178eaf8ff857c18d274093e8bc584c998b107fbb387f179b23 Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.595287 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.967908 4943 generic.go:334] "Generic (PLEG): container finished" podID="09af807a-c4a1-4589-8fdf-57401b450bd4" containerID="35711d0d8499826cc2199d564f5ea8128a64daa243631f75bd43813601db822f" exitCode=0 Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.968254 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" event={"ID":"09af807a-c4a1-4589-8fdf-57401b450bd4","Type":"ContainerDied","Data":"35711d0d8499826cc2199d564f5ea8128a64daa243631f75bd43813601db822f"} Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.968288 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" event={"ID":"09af807a-c4a1-4589-8fdf-57401b450bd4","Type":"ContainerStarted","Data":"81e6ca7b5dc1f8178eaf8ff857c18d274093e8bc584c998b107fbb387f179b23"} Nov 29 07:15:57 crc kubenswrapper[4943]: I1129 07:15:57.974179 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:15:57 crc kubenswrapper[4943]: W1129 07:15:57.976353 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf023768_5d6d_4f46_bf87_b675b8a32480.slice/crio-b758215beaa1c4d2f2f9202646e7131d4c3232aa0787cafb8416efdcf770ad22 WatchSource:0}: Error finding container b758215beaa1c4d2f2f9202646e7131d4c3232aa0787cafb8416efdcf770ad22: Status 404 returned error can't find the container with id b758215beaa1c4d2f2f9202646e7131d4c3232aa0787cafb8416efdcf770ad22 Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.412816 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.506513 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.506897 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.506932 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.507024 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnlbj\" (UniqueName: \"kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.507315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.507356 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam\") pod \"09af807a-c4a1-4589-8fdf-57401b450bd4\" (UID: \"09af807a-c4a1-4589-8fdf-57401b450bd4\") " Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.511387 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj" (OuterVolumeSpecName: "kube-api-access-xnlbj") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "kube-api-access-xnlbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.530864 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.531101 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.531271 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config" (OuterVolumeSpecName: "config") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.532371 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.534469 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "09af807a-c4a1-4589-8fdf-57401b450bd4" (UID: "09af807a-c4a1-4589-8fdf-57401b450bd4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609644 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609676 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609689 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609704 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnlbj\" (UniqueName: \"kubernetes.io/projected/09af807a-c4a1-4589-8fdf-57401b450bd4-kube-api-access-xnlbj\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609716 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.609727 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09af807a-c4a1-4589-8fdf-57401b450bd4-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.979639 4943 generic.go:334] "Generic (PLEG): container finished" podID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerID="46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7" exitCode=0 Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.979728 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" event={"ID":"cf023768-5d6d-4f46-bf87-b675b8a32480","Type":"ContainerDied","Data":"46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7"} Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.979772 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" event={"ID":"cf023768-5d6d-4f46-bf87-b675b8a32480","Type":"ContainerStarted","Data":"b758215beaa1c4d2f2f9202646e7131d4c3232aa0787cafb8416efdcf770ad22"} Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.983031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" event={"ID":"09af807a-c4a1-4589-8fdf-57401b450bd4","Type":"ContainerDied","Data":"81e6ca7b5dc1f8178eaf8ff857c18d274093e8bc584c998b107fbb387f179b23"} Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.983078 4943 scope.go:117] "RemoveContainer" containerID="35711d0d8499826cc2199d564f5ea8128a64daa243631f75bd43813601db822f" Nov 29 07:15:58 crc kubenswrapper[4943]: I1129 07:15:58.983185 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-mdwdt" Nov 29 07:15:59 crc kubenswrapper[4943]: I1129 07:15:59.230098 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:59 crc kubenswrapper[4943]: I1129 07:15:59.237410 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-mdwdt"] Nov 29 07:15:59 crc kubenswrapper[4943]: I1129 07:15:59.339401 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09af807a-c4a1-4589-8fdf-57401b450bd4" path="/var/lib/kubelet/pods/09af807a-c4a1-4589-8fdf-57401b450bd4/volumes" Nov 29 07:15:59 crc kubenswrapper[4943]: I1129 07:15:59.992424 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" event={"ID":"cf023768-5d6d-4f46-bf87-b675b8a32480","Type":"ContainerStarted","Data":"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a"} Nov 29 07:15:59 crc kubenswrapper[4943]: I1129 07:15:59.993283 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:16:00 crc kubenswrapper[4943]: I1129 07:16:00.013810 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" podStartSLOduration=3.013788302 podStartE2EDuration="3.013788302s" podCreationTimestamp="2025-11-29 07:15:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:16:00.00806414 +0000 UTC m=+2534.938152923" watchObservedRunningTime="2025-11-29 07:16:00.013788302 +0000 UTC m=+2534.943877055" Nov 29 07:16:07 crc kubenswrapper[4943]: I1129 07:16:07.469736 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:16:07 crc kubenswrapper[4943]: I1129 07:16:07.530731 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:16:07 crc kubenswrapper[4943]: I1129 07:16:07.530982 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="dnsmasq-dns" containerID="cri-o://22c8c91a6c9d2f661d6e79990c109a6792384289ec91138cb365bb54195de1f4" gracePeriod=10 Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.086893 4943 generic.go:334] "Generic (PLEG): container finished" podID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerID="22c8c91a6c9d2f661d6e79990c109a6792384289ec91138cb365bb54195de1f4" exitCode=0 Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.087148 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" event={"ID":"61e0b52f-bff7-4aaa-a0e3-309165bc0513","Type":"ContainerDied","Data":"22c8c91a6c9d2f661d6e79990c109a6792384289ec91138cb365bb54195de1f4"} Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.087176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" event={"ID":"61e0b52f-bff7-4aaa-a0e3-309165bc0513","Type":"ContainerDied","Data":"ac974df5f3425681d436ad56e13e97c1f20e70c884196fc231a50b9d3ee443ba"} Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.087187 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac974df5f3425681d436ad56e13e97c1f20e70c884196fc231a50b9d3ee443ba" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.118300 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.186413 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config\") pod \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.186486 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s97s5\" (UniqueName: \"kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5\") pod \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.186541 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb\") pod \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.186590 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb\") pod \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.186735 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc\") pod \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\" (UID: \"61e0b52f-bff7-4aaa-a0e3-309165bc0513\") " Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.200813 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5" (OuterVolumeSpecName: "kube-api-access-s97s5") pod "61e0b52f-bff7-4aaa-a0e3-309165bc0513" (UID: "61e0b52f-bff7-4aaa-a0e3-309165bc0513"). InnerVolumeSpecName "kube-api-access-s97s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.237266 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "61e0b52f-bff7-4aaa-a0e3-309165bc0513" (UID: "61e0b52f-bff7-4aaa-a0e3-309165bc0513"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.243001 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config" (OuterVolumeSpecName: "config") pod "61e0b52f-bff7-4aaa-a0e3-309165bc0513" (UID: "61e0b52f-bff7-4aaa-a0e3-309165bc0513"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.243127 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "61e0b52f-bff7-4aaa-a0e3-309165bc0513" (UID: "61e0b52f-bff7-4aaa-a0e3-309165bc0513"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.258064 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "61e0b52f-bff7-4aaa-a0e3-309165bc0513" (UID: "61e0b52f-bff7-4aaa-a0e3-309165bc0513"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.288924 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s97s5\" (UniqueName: \"kubernetes.io/projected/61e0b52f-bff7-4aaa-a0e3-309165bc0513-kube-api-access-s97s5\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.288987 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.289000 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.289011 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:08 crc kubenswrapper[4943]: I1129 07:16:08.289023 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e0b52f-bff7-4aaa-a0e3-309165bc0513-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:09 crc kubenswrapper[4943]: I1129 07:16:09.093486 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-vzvpz" Nov 29 07:16:09 crc kubenswrapper[4943]: I1129 07:16:09.124961 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:16:09 crc kubenswrapper[4943]: I1129 07:16:09.134816 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-vzvpz"] Nov 29 07:16:09 crc kubenswrapper[4943]: I1129 07:16:09.347032 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" path="/var/lib/kubelet/pods/61e0b52f-bff7-4aaa-a0e3-309165bc0513/volumes" Nov 29 07:16:10 crc kubenswrapper[4943]: I1129 07:16:10.328017 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:16:10 crc kubenswrapper[4943]: E1129 07:16:10.328518 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.200150 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj"] Nov 29 07:16:13 crc kubenswrapper[4943]: E1129 07:16:13.200831 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="dnsmasq-dns" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.200849 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="dnsmasq-dns" Nov 29 07:16:13 crc kubenswrapper[4943]: E1129 07:16:13.200882 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="init" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.200893 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="init" Nov 29 07:16:13 crc kubenswrapper[4943]: E1129 07:16:13.200916 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09af807a-c4a1-4589-8fdf-57401b450bd4" containerName="init" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.200924 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="09af807a-c4a1-4589-8fdf-57401b450bd4" containerName="init" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.201139 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e0b52f-bff7-4aaa-a0e3-309165bc0513" containerName="dnsmasq-dns" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.201162 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="09af807a-c4a1-4589-8fdf-57401b450bd4" containerName="init" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.201905 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.203979 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.204318 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.204544 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.205445 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.217816 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj"] Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.278613 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.278674 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.278784 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.278825 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjtf6\" (UniqueName: \"kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.381058 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.381132 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjtf6\" (UniqueName: \"kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.381293 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.381372 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.390905 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.391585 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.396618 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.403309 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjtf6\" (UniqueName: \"kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:13 crc kubenswrapper[4943]: I1129 07:16:13.529262 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:14 crc kubenswrapper[4943]: I1129 07:16:14.087855 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj"] Nov 29 07:16:14 crc kubenswrapper[4943]: W1129 07:16:14.089696 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e8445cc_bfe6_46ee_bd56_7a051fd994cd.slice/crio-f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91 WatchSource:0}: Error finding container f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91: Status 404 returned error can't find the container with id f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91 Nov 29 07:16:14 crc kubenswrapper[4943]: I1129 07:16:14.133683 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" event={"ID":"2e8445cc-bfe6-46ee-bd56-7a051fd994cd","Type":"ContainerStarted","Data":"f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91"} Nov 29 07:16:24 crc kubenswrapper[4943]: I1129 07:16:24.221179 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" event={"ID":"2e8445cc-bfe6-46ee-bd56-7a051fd994cd","Type":"ContainerStarted","Data":"5f65695c3772c7d2863f6b60632c35b9ba30bcd39d975205bbd1e6c3f061c62c"} Nov 29 07:16:24 crc kubenswrapper[4943]: I1129 07:16:24.240788 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" podStartSLOduration=1.5527775990000001 podStartE2EDuration="11.240771971s" podCreationTimestamp="2025-11-29 07:16:13 +0000 UTC" firstStartedPulling="2025-11-29 07:16:14.099404215 +0000 UTC m=+2549.029492968" lastFinishedPulling="2025-11-29 07:16:23.787398587 +0000 UTC m=+2558.717487340" observedRunningTime="2025-11-29 07:16:24.235939262 +0000 UTC m=+2559.166028045" watchObservedRunningTime="2025-11-29 07:16:24.240771971 +0000 UTC m=+2559.170860724" Nov 29 07:16:24 crc kubenswrapper[4943]: I1129 07:16:24.327806 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:16:24 crc kubenswrapper[4943]: E1129 07:16:24.328117 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:16:26 crc kubenswrapper[4943]: I1129 07:16:26.237237 4943 generic.go:334] "Generic (PLEG): container finished" podID="b2373f83-f75a-4f85-a8dd-133f36458591" containerID="94ae004a8fa8c358501a62ecd041b42ccb2c77cc5076edc2d30b4f6f7f13aea5" exitCode=0 Nov 29 07:16:26 crc kubenswrapper[4943]: I1129 07:16:26.237329 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b2373f83-f75a-4f85-a8dd-133f36458591","Type":"ContainerDied","Data":"94ae004a8fa8c358501a62ecd041b42ccb2c77cc5076edc2d30b4f6f7f13aea5"} Nov 29 07:16:27 crc kubenswrapper[4943]: I1129 07:16:27.246796 4943 generic.go:334] "Generic (PLEG): container finished" podID="ba80f24b-269a-42ff-b97e-94623499b030" containerID="f2674c69e270a6e70906b951eaf45b841fba6da6abc7530603d6392c1efcda5a" exitCode=0 Nov 29 07:16:27 crc kubenswrapper[4943]: I1129 07:16:27.246914 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ba80f24b-269a-42ff-b97e-94623499b030","Type":"ContainerDied","Data":"f2674c69e270a6e70906b951eaf45b841fba6da6abc7530603d6392c1efcda5a"} Nov 29 07:16:28 crc kubenswrapper[4943]: I1129 07:16:28.256678 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ba80f24b-269a-42ff-b97e-94623499b030","Type":"ContainerStarted","Data":"8b59684298f026a6ee45bee2d9d6247cbfc31aa9e0b7b634c2e2c1738fc26d3b"} Nov 29 07:16:29 crc kubenswrapper[4943]: I1129 07:16:29.267185 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b2373f83-f75a-4f85-a8dd-133f36458591","Type":"ContainerStarted","Data":"361b29ae4ed51e057f26322a41689491d03bc96769e7e55faeffaf27105d2727"} Nov 29 07:16:29 crc kubenswrapper[4943]: I1129 07:16:29.267397 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:16:29 crc kubenswrapper[4943]: I1129 07:16:29.267679 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 29 07:16:29 crc kubenswrapper[4943]: I1129 07:16:29.299866 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.299845734 podStartE2EDuration="38.299845734s" podCreationTimestamp="2025-11-29 07:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:16:29.289462207 +0000 UTC m=+2564.219550980" watchObservedRunningTime="2025-11-29 07:16:29.299845734 +0000 UTC m=+2564.229934487" Nov 29 07:16:29 crc kubenswrapper[4943]: I1129 07:16:29.315593 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.315548261 podStartE2EDuration="38.315548261s" podCreationTimestamp="2025-11-29 07:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:16:29.31026749 +0000 UTC m=+2564.240356263" watchObservedRunningTime="2025-11-29 07:16:29.315548261 +0000 UTC m=+2564.245637014" Nov 29 07:16:37 crc kubenswrapper[4943]: I1129 07:16:37.328497 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:16:37 crc kubenswrapper[4943]: E1129 07:16:37.329359 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:16:41 crc kubenswrapper[4943]: I1129 07:16:41.365549 4943 generic.go:334] "Generic (PLEG): container finished" podID="2e8445cc-bfe6-46ee-bd56-7a051fd994cd" containerID="5f65695c3772c7d2863f6b60632c35b9ba30bcd39d975205bbd1e6c3f061c62c" exitCode=0 Nov 29 07:16:41 crc kubenswrapper[4943]: I1129 07:16:41.365696 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" event={"ID":"2e8445cc-bfe6-46ee-bd56-7a051fd994cd","Type":"ContainerDied","Data":"5f65695c3772c7d2863f6b60632c35b9ba30bcd39d975205bbd1e6c3f061c62c"} Nov 29 07:16:41 crc kubenswrapper[4943]: I1129 07:16:41.847788 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.388800 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.821610 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.879596 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory\") pod \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.879706 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle\") pod \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.879799 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjtf6\" (UniqueName: \"kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6\") pod \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.879846 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key\") pod \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\" (UID: \"2e8445cc-bfe6-46ee-bd56-7a051fd994cd\") " Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.887089 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6" (OuterVolumeSpecName: "kube-api-access-sjtf6") pod "2e8445cc-bfe6-46ee-bd56-7a051fd994cd" (UID: "2e8445cc-bfe6-46ee-bd56-7a051fd994cd"). InnerVolumeSpecName "kube-api-access-sjtf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.887189 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "2e8445cc-bfe6-46ee-bd56-7a051fd994cd" (UID: "2e8445cc-bfe6-46ee-bd56-7a051fd994cd"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.907162 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory" (OuterVolumeSpecName: "inventory") pod "2e8445cc-bfe6-46ee-bd56-7a051fd994cd" (UID: "2e8445cc-bfe6-46ee-bd56-7a051fd994cd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.908382 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2e8445cc-bfe6-46ee-bd56-7a051fd994cd" (UID: "2e8445cc-bfe6-46ee-bd56-7a051fd994cd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.982210 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.982244 4943 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.982256 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjtf6\" (UniqueName: \"kubernetes.io/projected/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-kube-api-access-sjtf6\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:42 crc kubenswrapper[4943]: I1129 07:16:42.982265 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e8445cc-bfe6-46ee-bd56-7a051fd994cd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.381394 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" event={"ID":"2e8445cc-bfe6-46ee-bd56-7a051fd994cd","Type":"ContainerDied","Data":"f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91"} Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.381440 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f36f41260391bf1d44fc7cdc9b744b8299461379f34ad51f8824484f3407ec91" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.381494 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.479667 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j"] Nov 29 07:16:43 crc kubenswrapper[4943]: E1129 07:16:43.480072 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e8445cc-bfe6-46ee-bd56-7a051fd994cd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.480088 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e8445cc-bfe6-46ee-bd56-7a051fd994cd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.480299 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e8445cc-bfe6-46ee-bd56-7a051fd994cd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.481067 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.483158 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.483584 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.484238 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.489665 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j"] Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.490937 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.592341 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.592603 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czl9f\" (UniqueName: \"kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.592668 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.593116 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.694834 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.694938 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czl9f\" (UniqueName: \"kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.694965 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.695076 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.699613 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.707115 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.707232 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.714052 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czl9f\" (UniqueName: \"kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:43 crc kubenswrapper[4943]: I1129 07:16:43.801424 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:16:44 crc kubenswrapper[4943]: I1129 07:16:44.184065 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j"] Nov 29 07:16:44 crc kubenswrapper[4943]: I1129 07:16:44.389905 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" event={"ID":"a93b5d7a-0946-4c3c-990d-e8988279b13a","Type":"ContainerStarted","Data":"58ecd56ed4a7238a9c7328e6ad926ca5552a553ba21b9c0561077ec6f290310d"} Nov 29 07:16:46 crc kubenswrapper[4943]: I1129 07:16:46.416170 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" event={"ID":"a93b5d7a-0946-4c3c-990d-e8988279b13a","Type":"ContainerStarted","Data":"c4236c3b987e967329a28a6256282ba8992591609610af4bcd34c4e1a40deeae"} Nov 29 07:16:46 crc kubenswrapper[4943]: I1129 07:16:46.438772 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" podStartSLOduration=2.550646924 podStartE2EDuration="3.438754925s" podCreationTimestamp="2025-11-29 07:16:43 +0000 UTC" firstStartedPulling="2025-11-29 07:16:44.191182405 +0000 UTC m=+2579.121271158" lastFinishedPulling="2025-11-29 07:16:45.079290406 +0000 UTC m=+2580.009379159" observedRunningTime="2025-11-29 07:16:46.431378403 +0000 UTC m=+2581.361467156" watchObservedRunningTime="2025-11-29 07:16:46.438754925 +0000 UTC m=+2581.368843678" Nov 29 07:16:48 crc kubenswrapper[4943]: I1129 07:16:48.328024 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:16:48 crc kubenswrapper[4943]: E1129 07:16:48.328627 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:16:59 crc kubenswrapper[4943]: I1129 07:16:59.327956 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:16:59 crc kubenswrapper[4943]: E1129 07:16:59.328743 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:17:12 crc kubenswrapper[4943]: I1129 07:17:12.327109 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:17:12 crc kubenswrapper[4943]: E1129 07:17:12.328135 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:17:27 crc kubenswrapper[4943]: I1129 07:17:27.328419 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:17:27 crc kubenswrapper[4943]: E1129 07:17:27.329422 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:17:40 crc kubenswrapper[4943]: I1129 07:17:40.328480 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:17:40 crc kubenswrapper[4943]: E1129 07:17:40.329696 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:17:55 crc kubenswrapper[4943]: I1129 07:17:55.338274 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:17:55 crc kubenswrapper[4943]: E1129 07:17:55.339129 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:18:06 crc kubenswrapper[4943]: I1129 07:18:06.328222 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:18:06 crc kubenswrapper[4943]: E1129 07:18:06.328893 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:18:09 crc kubenswrapper[4943]: I1129 07:18:09.038529 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-hr2kc"] Nov 29 07:18:09 crc kubenswrapper[4943]: I1129 07:18:09.053247 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-hr2kc"] Nov 29 07:18:09 crc kubenswrapper[4943]: I1129 07:18:09.339904 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4585ad40-a69e-4a85-9f0f-e051f80ec2a2" path="/var/lib/kubelet/pods/4585ad40-a69e-4a85-9f0f-e051f80ec2a2/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.083850 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-42ffw"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.096060 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3fcd-account-create-update-fp4gv"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.106201 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d123-account-create-update-zdc47"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.113901 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-9232-account-create-update-lrvzq"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.122520 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-b55h5"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.130847 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-42ffw"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.138752 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-b55h5"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.145242 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d123-account-create-update-zdc47"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.151634 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-9232-account-create-update-lrvzq"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.160887 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3fcd-account-create-update-fp4gv"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.168633 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-qfd9g"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.176898 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-qfd9g"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.187425 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2086-account-create-update-6gl95"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.196180 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-zxcn5"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.203854 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-zxcn5"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.212336 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-39a1-account-create-update-bjzkr"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.221662 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2086-account-create-update-6gl95"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.231107 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-39a1-account-create-update-bjzkr"] Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.339670 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03ca0239-e8f6-4461-a843-27423c41c4b3" path="/var/lib/kubelet/pods/03ca0239-e8f6-4461-a843-27423c41c4b3/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.340591 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b1a5eac-fd81-4595-a46b-dc0d79e09def" path="/var/lib/kubelet/pods/1b1a5eac-fd81-4595-a46b-dc0d79e09def/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.341204 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43b36ea0-76e2-4c88-acb5-1009ac0c4383" path="/var/lib/kubelet/pods/43b36ea0-76e2-4c88-acb5-1009ac0c4383/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.341763 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7" path="/var/lib/kubelet/pods/7cb5a52a-491f-4fe1-8a5d-3a6c9cf461f7/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.342737 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ea41621-2d8a-4f99-ad06-da34cadcca6a" path="/var/lib/kubelet/pods/8ea41621-2d8a-4f99-ad06-da34cadcca6a/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.343342 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9380c6cb-987b-4813-b0fd-0fc45c0ddaaa" path="/var/lib/kubelet/pods/9380c6cb-987b-4813-b0fd-0fc45c0ddaaa/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.343888 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a178d2d1-8c2f-4da5-bad1-15edb77a9508" path="/var/lib/kubelet/pods/a178d2d1-8c2f-4da5-bad1-15edb77a9508/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.344900 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2698992-ed9a-4c83-9294-71ba289f83f9" path="/var/lib/kubelet/pods/b2698992-ed9a-4c83-9294-71ba289f83f9/volumes" Nov 29 07:18:13 crc kubenswrapper[4943]: I1129 07:18:13.345431 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2" path="/var/lib/kubelet/pods/def6cfce-1ee4-4dd1-b5ab-c4fd8ff370c2/volumes" Nov 29 07:18:19 crc kubenswrapper[4943]: I1129 07:18:19.328535 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:18:19 crc kubenswrapper[4943]: E1129 07:18:19.329284 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.039643 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-qhcwg"] Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.051684 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-daf9-account-create-update-qq5jc"] Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.061796 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-qhcwg"] Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.072235 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-daf9-account-create-update-qq5jc"] Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.336688 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08053aab-1c5b-41a7-bd0f-a51e91e7bcbb" path="/var/lib/kubelet/pods/08053aab-1c5b-41a7-bd0f-a51e91e7bcbb/volumes" Nov 29 07:18:23 crc kubenswrapper[4943]: I1129 07:18:23.337293 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee3e2ff9-9a6e-4d2e-a487-6574ab627de4" path="/var/lib/kubelet/pods/ee3e2ff9-9a6e-4d2e-a487-6574ab627de4/volumes" Nov 29 07:18:29 crc kubenswrapper[4943]: I1129 07:18:29.050759 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-skrww"] Nov 29 07:18:29 crc kubenswrapper[4943]: I1129 07:18:29.059062 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-skrww"] Nov 29 07:18:29 crc kubenswrapper[4943]: I1129 07:18:29.348055 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3462ac03-a13b-49cf-ab08-b8d7a089a7e5" path="/var/lib/kubelet/pods/3462ac03-a13b-49cf-ab08-b8d7a089a7e5/volumes" Nov 29 07:18:31 crc kubenswrapper[4943]: I1129 07:18:31.327100 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:18:31 crc kubenswrapper[4943]: E1129 07:18:31.327797 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:18:43 crc kubenswrapper[4943]: I1129 07:18:43.327611 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:18:43 crc kubenswrapper[4943]: E1129 07:18:43.328682 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.611531 4943 scope.go:117] "RemoveContainer" containerID="49eb44bbf07b71e42a3a8bf15c19af51acb0736fa53457792a2dec56644cbb16" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.642539 4943 scope.go:117] "RemoveContainer" containerID="1bd3ea063b789f68267abc2074a9c569cff4e391ba9eaf887e29a7c7d18a8f57" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.702476 4943 scope.go:117] "RemoveContainer" containerID="40069339da6faee502fef270fc26c5c39e4502fb012e1eb3e8ee912328070e78" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.747656 4943 scope.go:117] "RemoveContainer" containerID="4d311826fc9b2c68f96b713c3041595e7cd1f52688c1a5e7767499bef047c281" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.800513 4943 scope.go:117] "RemoveContainer" containerID="475ae878fac798e5a8e5c17195778d15dd9f86cbf30a2fcda7356582ffac0118" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.829484 4943 scope.go:117] "RemoveContainer" containerID="63769c0a9a18143ad328e5b983d7b53a2d6486270c45e1008e174b0d0e687f8f" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.890627 4943 scope.go:117] "RemoveContainer" containerID="812bddea28046febf505711572111b12eaeffd67e2db3f86911b6b9e716b58d8" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.911289 4943 scope.go:117] "RemoveContainer" containerID="3c24bdf867712015a1678899e4b340040c5d61d8afbc28b43f1989a0826ce83d" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.927669 4943 scope.go:117] "RemoveContainer" containerID="f0ad72ec7d3168ba523f0752c312e41a6e43ed4caa235fb078189bbbbf5d8766" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.947009 4943 scope.go:117] "RemoveContainer" containerID="e99a74850d328e67e8b77319697ca772ba91e0b146a63cd4bad836ddff781b39" Nov 29 07:18:50 crc kubenswrapper[4943]: I1129 07:18:50.974200 4943 scope.go:117] "RemoveContainer" containerID="7a32de18e5615145f0c31b1d2ae3110319af8d1f6916b90ab0c68f221c990089" Nov 29 07:18:51 crc kubenswrapper[4943]: I1129 07:18:51.017992 4943 scope.go:117] "RemoveContainer" containerID="bd3e80ccfa3c12715c355eb49afd4ecd5a2f1ce180109cbbb104bb02b4cd5f0c" Nov 29 07:18:51 crc kubenswrapper[4943]: I1129 07:18:51.045946 4943 scope.go:117] "RemoveContainer" containerID="199414f3d820fc66a2dd9b8ac3304ebe754a84e7836748578081aefa1fcb1c2f" Nov 29 07:18:56 crc kubenswrapper[4943]: I1129 07:18:56.327702 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:18:56 crc kubenswrapper[4943]: E1129 07:18:56.328424 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:19:11 crc kubenswrapper[4943]: I1129 07:19:11.330911 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:19:11 crc kubenswrapper[4943]: I1129 07:19:11.817665 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6"} Nov 29 07:19:45 crc kubenswrapper[4943]: I1129 07:19:45.056365 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-mv6c9"] Nov 29 07:19:45 crc kubenswrapper[4943]: I1129 07:19:45.065545 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-mv6c9"] Nov 29 07:19:45 crc kubenswrapper[4943]: I1129 07:19:45.356293 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3660253d-7204-4091-8cb2-589517e751e7" path="/var/lib/kubelet/pods/3660253d-7204-4091-8cb2-589517e751e7/volumes" Nov 29 07:19:51 crc kubenswrapper[4943]: I1129 07:19:51.292914 4943 scope.go:117] "RemoveContainer" containerID="17a43dedef1afa255466b91a097655f04ac7c357e2551c023d070c28c8719c35" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.627805 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.630629 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.645774 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.730125 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.730267 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.730378 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp4r6\" (UniqueName: \"kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.832052 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.832195 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp4r6\" (UniqueName: \"kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.832513 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.833004 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.833187 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.850892 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp4r6\" (UniqueName: \"kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6\") pod \"redhat-operators-5kcvl\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:41 crc kubenswrapper[4943]: I1129 07:20:41.961077 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:42 crc kubenswrapper[4943]: I1129 07:20:42.408261 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:20:42 crc kubenswrapper[4943]: I1129 07:20:42.682280 4943 generic.go:334] "Generic (PLEG): container finished" podID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerID="4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934" exitCode=0 Nov 29 07:20:42 crc kubenswrapper[4943]: I1129 07:20:42.682328 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerDied","Data":"4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934"} Nov 29 07:20:42 crc kubenswrapper[4943]: I1129 07:20:42.682670 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerStarted","Data":"f4a24d81db4c7a17b653899ba283336ee96c9b2691687dd8aa6db5b138644a4d"} Nov 29 07:20:42 crc kubenswrapper[4943]: I1129 07:20:42.684078 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:20:44 crc kubenswrapper[4943]: I1129 07:20:44.734052 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerStarted","Data":"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87"} Nov 29 07:20:45 crc kubenswrapper[4943]: I1129 07:20:45.748264 4943 generic.go:334] "Generic (PLEG): container finished" podID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerID="10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87" exitCode=0 Nov 29 07:20:45 crc kubenswrapper[4943]: I1129 07:20:45.748376 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerDied","Data":"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87"} Nov 29 07:20:48 crc kubenswrapper[4943]: I1129 07:20:48.786795 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerStarted","Data":"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12"} Nov 29 07:20:48 crc kubenswrapper[4943]: I1129 07:20:48.823666 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5kcvl" podStartSLOduration=2.16137454 podStartE2EDuration="7.823550026s" podCreationTimestamp="2025-11-29 07:20:41 +0000 UTC" firstStartedPulling="2025-11-29 07:20:42.683745973 +0000 UTC m=+2817.613834726" lastFinishedPulling="2025-11-29 07:20:48.345921449 +0000 UTC m=+2823.276010212" observedRunningTime="2025-11-29 07:20:48.804990999 +0000 UTC m=+2823.735079812" watchObservedRunningTime="2025-11-29 07:20:48.823550026 +0000 UTC m=+2823.753638799" Nov 29 07:20:51 crc kubenswrapper[4943]: I1129 07:20:51.390629 4943 scope.go:117] "RemoveContainer" containerID="b768dd1600aaf9f8eb9d4d89decdb235a5a6c32f57113ea21a9285a483634c9b" Nov 29 07:20:51 crc kubenswrapper[4943]: I1129 07:20:51.421281 4943 scope.go:117] "RemoveContainer" containerID="22c8c91a6c9d2f661d6e79990c109a6792384289ec91138cb365bb54195de1f4" Nov 29 07:20:51 crc kubenswrapper[4943]: I1129 07:20:51.961204 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:51 crc kubenswrapper[4943]: I1129 07:20:51.961867 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:20:53 crc kubenswrapper[4943]: I1129 07:20:53.022183 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5kcvl" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="registry-server" probeResult="failure" output=< Nov 29 07:20:53 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:20:53 crc kubenswrapper[4943]: > Nov 29 07:21:02 crc kubenswrapper[4943]: I1129 07:21:02.013525 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:21:02 crc kubenswrapper[4943]: I1129 07:21:02.066601 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:21:03 crc kubenswrapper[4943]: I1129 07:21:03.149008 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:21:03 crc kubenswrapper[4943]: I1129 07:21:03.933491 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5kcvl" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="registry-server" containerID="cri-o://964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12" gracePeriod=2 Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.932198 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.945775 4943 generic.go:334] "Generic (PLEG): container finished" podID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerID="964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12" exitCode=0 Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.945812 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerDied","Data":"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12"} Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.945837 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5kcvl" event={"ID":"f6429499-0a1d-4205-84a0-a3bb2c125290","Type":"ContainerDied","Data":"f4a24d81db4c7a17b653899ba283336ee96c9b2691687dd8aa6db5b138644a4d"} Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.945854 4943 scope.go:117] "RemoveContainer" containerID="964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12" Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.945908 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5kcvl" Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.971502 4943 scope.go:117] "RemoveContainer" containerID="10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87" Nov 29 07:21:04 crc kubenswrapper[4943]: I1129 07:21:04.994321 4943 scope.go:117] "RemoveContainer" containerID="4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.016198 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp4r6\" (UniqueName: \"kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6\") pod \"f6429499-0a1d-4205-84a0-a3bb2c125290\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.016338 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content\") pod \"f6429499-0a1d-4205-84a0-a3bb2c125290\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.016433 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities\") pod \"f6429499-0a1d-4205-84a0-a3bb2c125290\" (UID: \"f6429499-0a1d-4205-84a0-a3bb2c125290\") " Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.017602 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities" (OuterVolumeSpecName: "utilities") pod "f6429499-0a1d-4205-84a0-a3bb2c125290" (UID: "f6429499-0a1d-4205-84a0-a3bb2c125290"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.023883 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6" (OuterVolumeSpecName: "kube-api-access-pp4r6") pod "f6429499-0a1d-4205-84a0-a3bb2c125290" (UID: "f6429499-0a1d-4205-84a0-a3bb2c125290"). InnerVolumeSpecName "kube-api-access-pp4r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.030999 4943 scope.go:117] "RemoveContainer" containerID="964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12" Nov 29 07:21:05 crc kubenswrapper[4943]: E1129 07:21:05.031468 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12\": container with ID starting with 964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12 not found: ID does not exist" containerID="964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.031858 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12"} err="failed to get container status \"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12\": rpc error: code = NotFound desc = could not find container \"964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12\": container with ID starting with 964038687afa2e572593349a6efe139ce05d25b9ca2d0a6d4a9fc74198bd7e12 not found: ID does not exist" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.031953 4943 scope.go:117] "RemoveContainer" containerID="10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87" Nov 29 07:21:05 crc kubenswrapper[4943]: E1129 07:21:05.032338 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87\": container with ID starting with 10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87 not found: ID does not exist" containerID="10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.032435 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87"} err="failed to get container status \"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87\": rpc error: code = NotFound desc = could not find container \"10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87\": container with ID starting with 10729178882c2a477f0cd5a35759f49bd211e074ba1d89728be5f0999e784c87 not found: ID does not exist" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.032496 4943 scope.go:117] "RemoveContainer" containerID="4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934" Nov 29 07:21:05 crc kubenswrapper[4943]: E1129 07:21:05.033007 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934\": container with ID starting with 4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934 not found: ID does not exist" containerID="4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.033038 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934"} err="failed to get container status \"4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934\": rpc error: code = NotFound desc = could not find container \"4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934\": container with ID starting with 4433d513265e6e502e98ef147ad9a605aca85373425cf9f1d8661aaae6e81934 not found: ID does not exist" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.120317 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.120356 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp4r6\" (UniqueName: \"kubernetes.io/projected/f6429499-0a1d-4205-84a0-a3bb2c125290-kube-api-access-pp4r6\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.124691 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6429499-0a1d-4205-84a0-a3bb2c125290" (UID: "f6429499-0a1d-4205-84a0-a3bb2c125290"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.222043 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6429499-0a1d-4205-84a0-a3bb2c125290-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.295602 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.307283 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5kcvl"] Nov 29 07:21:05 crc kubenswrapper[4943]: I1129 07:21:05.344390 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" path="/var/lib/kubelet/pods/f6429499-0a1d-4205-84a0-a3bb2c125290/volumes" Nov 29 07:21:08 crc kubenswrapper[4943]: I1129 07:21:08.986885 4943 generic.go:334] "Generic (PLEG): container finished" podID="a93b5d7a-0946-4c3c-990d-e8988279b13a" containerID="c4236c3b987e967329a28a6256282ba8992591609610af4bcd34c4e1a40deeae" exitCode=0 Nov 29 07:21:08 crc kubenswrapper[4943]: I1129 07:21:08.987089 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" event={"ID":"a93b5d7a-0946-4c3c-990d-e8988279b13a","Type":"ContainerDied","Data":"c4236c3b987e967329a28a6256282ba8992591609610af4bcd34c4e1a40deeae"} Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.550192 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.732965 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory\") pod \"a93b5d7a-0946-4c3c-990d-e8988279b13a\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.733280 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key\") pod \"a93b5d7a-0946-4c3c-990d-e8988279b13a\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.733370 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czl9f\" (UniqueName: \"kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f\") pod \"a93b5d7a-0946-4c3c-990d-e8988279b13a\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.733465 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle\") pod \"a93b5d7a-0946-4c3c-990d-e8988279b13a\" (UID: \"a93b5d7a-0946-4c3c-990d-e8988279b13a\") " Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.738264 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a93b5d7a-0946-4c3c-990d-e8988279b13a" (UID: "a93b5d7a-0946-4c3c-990d-e8988279b13a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.739937 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f" (OuterVolumeSpecName: "kube-api-access-czl9f") pod "a93b5d7a-0946-4c3c-990d-e8988279b13a" (UID: "a93b5d7a-0946-4c3c-990d-e8988279b13a"). InnerVolumeSpecName "kube-api-access-czl9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.775478 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory" (OuterVolumeSpecName: "inventory") pod "a93b5d7a-0946-4c3c-990d-e8988279b13a" (UID: "a93b5d7a-0946-4c3c-990d-e8988279b13a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.778213 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a93b5d7a-0946-4c3c-990d-e8988279b13a" (UID: "a93b5d7a-0946-4c3c-990d-e8988279b13a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.835275 4943 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.835317 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.835331 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a93b5d7a-0946-4c3c-990d-e8988279b13a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:10 crc kubenswrapper[4943]: I1129 07:21:10.835347 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czl9f\" (UniqueName: \"kubernetes.io/projected/a93b5d7a-0946-4c3c-990d-e8988279b13a-kube-api-access-czl9f\") on node \"crc\" DevicePath \"\"" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.019172 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" event={"ID":"a93b5d7a-0946-4c3c-990d-e8988279b13a","Type":"ContainerDied","Data":"58ecd56ed4a7238a9c7328e6ad926ca5552a553ba21b9c0561077ec6f290310d"} Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.019233 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58ecd56ed4a7238a9c7328e6ad926ca5552a553ba21b9c0561077ec6f290310d" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.019269 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.122837 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg"] Nov 29 07:21:11 crc kubenswrapper[4943]: E1129 07:21:11.123246 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="extract-content" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123267 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="extract-content" Nov 29 07:21:11 crc kubenswrapper[4943]: E1129 07:21:11.123282 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="registry-server" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123291 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="registry-server" Nov 29 07:21:11 crc kubenswrapper[4943]: E1129 07:21:11.123319 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="extract-utilities" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123327 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="extract-utilities" Nov 29 07:21:11 crc kubenswrapper[4943]: E1129 07:21:11.123340 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a93b5d7a-0946-4c3c-990d-e8988279b13a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123351 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93b5d7a-0946-4c3c-990d-e8988279b13a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123620 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6429499-0a1d-4205-84a0-a3bb2c125290" containerName="registry-server" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.123650 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93b5d7a-0946-4c3c-990d-e8988279b13a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.124362 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.128249 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.128424 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.128715 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.128288 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.132835 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg"] Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.241784 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zfxk\" (UniqueName: \"kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.242127 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.242332 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.344294 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zfxk\" (UniqueName: \"kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.344430 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.344682 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.349906 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.351098 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.365017 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zfxk\" (UniqueName: \"kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:11 crc kubenswrapper[4943]: I1129 07:21:11.449190 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:21:12 crc kubenswrapper[4943]: I1129 07:21:12.044534 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg"] Nov 29 07:21:13 crc kubenswrapper[4943]: I1129 07:21:13.041283 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" event={"ID":"1fb5fb95-b94e-4452-af1b-15cf34d847bf","Type":"ContainerStarted","Data":"09f0938c186caecd239871f9621219410f58fb69fc9c99dfa21440e0249fdfd0"} Nov 29 07:21:14 crc kubenswrapper[4943]: I1129 07:21:14.052814 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" event={"ID":"1fb5fb95-b94e-4452-af1b-15cf34d847bf","Type":"ContainerStarted","Data":"0acd3887c38c8ce166238bd104d3f76f44a100b975ec75724fdb916b5bf5ff7a"} Nov 29 07:21:14 crc kubenswrapper[4943]: I1129 07:21:14.091511 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" podStartSLOduration=2.069081305 podStartE2EDuration="3.0914912s" podCreationTimestamp="2025-11-29 07:21:11 +0000 UTC" firstStartedPulling="2025-11-29 07:21:12.040388245 +0000 UTC m=+2846.970477008" lastFinishedPulling="2025-11-29 07:21:13.06279815 +0000 UTC m=+2847.992886903" observedRunningTime="2025-11-29 07:21:14.077400983 +0000 UTC m=+2849.007489746" watchObservedRunningTime="2025-11-29 07:21:14.0914912 +0000 UTC m=+2849.021579963" Nov 29 07:21:32 crc kubenswrapper[4943]: I1129 07:21:32.613195 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:21:32 crc kubenswrapper[4943]: I1129 07:21:32.614172 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:22:02 crc kubenswrapper[4943]: I1129 07:22:02.613219 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:22:02 crc kubenswrapper[4943]: I1129 07:22:02.613911 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:22:18 crc kubenswrapper[4943]: I1129 07:22:18.053446 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-54gvp"] Nov 29 07:22:18 crc kubenswrapper[4943]: I1129 07:22:18.065209 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-54gvp"] Nov 29 07:22:19 crc kubenswrapper[4943]: I1129 07:22:19.027986 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-14c7-account-create-update-qxjwt"] Nov 29 07:22:19 crc kubenswrapper[4943]: I1129 07:22:19.038137 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-14c7-account-create-update-qxjwt"] Nov 29 07:22:19 crc kubenswrapper[4943]: I1129 07:22:19.339278 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="119259aa-fc0c-4ed0-bcd8-402b1016bea6" path="/var/lib/kubelet/pods/119259aa-fc0c-4ed0-bcd8-402b1016bea6/volumes" Nov 29 07:22:19 crc kubenswrapper[4943]: I1129 07:22:19.340650 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f108cd69-36fb-406c-9038-c690b15eaa26" path="/var/lib/kubelet/pods/f108cd69-36fb-406c-9038-c690b15eaa26/volumes" Nov 29 07:22:22 crc kubenswrapper[4943]: I1129 07:22:22.025528 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-mkjph"] Nov 29 07:22:22 crc kubenswrapper[4943]: I1129 07:22:22.033963 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-jrkxk"] Nov 29 07:22:22 crc kubenswrapper[4943]: I1129 07:22:22.043297 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-mkjph"] Nov 29 07:22:22 crc kubenswrapper[4943]: I1129 07:22:22.051129 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-jrkxk"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.029978 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-195a-account-create-update-cthml"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.045111 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-99dmd"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.056364 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-0721-account-create-update-v4z8w"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.063212 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-195a-account-create-update-cthml"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.070421 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-0721-account-create-update-v4z8w"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.077613 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-99dmd"] Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.338098 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2136019f-116e-4d59-8187-ddffe47807b5" path="/var/lib/kubelet/pods/2136019f-116e-4d59-8187-ddffe47807b5/volumes" Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.339164 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3efb75d6-355a-4ec4-9032-1d23890dcf5e" path="/var/lib/kubelet/pods/3efb75d6-355a-4ec4-9032-1d23890dcf5e/volumes" Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.339836 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="923cc7a6-0fd5-44c0-a568-88eeccc8f31e" path="/var/lib/kubelet/pods/923cc7a6-0fd5-44c0-a568-88eeccc8f31e/volumes" Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.341788 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b338485-63e2-4d8b-bb25-3b37aa40d138" path="/var/lib/kubelet/pods/9b338485-63e2-4d8b-bb25-3b37aa40d138/volumes" Nov 29 07:22:23 crc kubenswrapper[4943]: I1129 07:22:23.342516 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a28df764-ae07-442a-8786-6941476ba033" path="/var/lib/kubelet/pods/a28df764-ae07-442a-8786-6941476ba033/volumes" Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.613680 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.614117 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.614163 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.614780 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.614906 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6" gracePeriod=600 Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.924587 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6" exitCode=0 Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.924623 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6"} Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.924670 4943 scope.go:117] "RemoveContainer" containerID="f62aacbc50fb4b57dcf85518216e3a729d4c236793404e301e7c405b4307586d" Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.926498 4943 generic.go:334] "Generic (PLEG): container finished" podID="1fb5fb95-b94e-4452-af1b-15cf34d847bf" containerID="0acd3887c38c8ce166238bd104d3f76f44a100b975ec75724fdb916b5bf5ff7a" exitCode=0 Nov 29 07:22:32 crc kubenswrapper[4943]: I1129 07:22:32.926532 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" event={"ID":"1fb5fb95-b94e-4452-af1b-15cf34d847bf","Type":"ContainerDied","Data":"0acd3887c38c8ce166238bd104d3f76f44a100b975ec75724fdb916b5bf5ff7a"} Nov 29 07:22:33 crc kubenswrapper[4943]: I1129 07:22:33.946117 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb"} Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.056666 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-dnmc7"] Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.063196 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-dnmc7"] Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.376992 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.534846 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key\") pod \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.535167 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zfxk\" (UniqueName: \"kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk\") pod \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.535199 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory\") pod \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\" (UID: \"1fb5fb95-b94e-4452-af1b-15cf34d847bf\") " Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.553330 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk" (OuterVolumeSpecName: "kube-api-access-4zfxk") pod "1fb5fb95-b94e-4452-af1b-15cf34d847bf" (UID: "1fb5fb95-b94e-4452-af1b-15cf34d847bf"). InnerVolumeSpecName "kube-api-access-4zfxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.563471 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1fb5fb95-b94e-4452-af1b-15cf34d847bf" (UID: "1fb5fb95-b94e-4452-af1b-15cf34d847bf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.565722 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory" (OuterVolumeSpecName: "inventory") pod "1fb5fb95-b94e-4452-af1b-15cf34d847bf" (UID: "1fb5fb95-b94e-4452-af1b-15cf34d847bf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.638113 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.638172 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zfxk\" (UniqueName: \"kubernetes.io/projected/1fb5fb95-b94e-4452-af1b-15cf34d847bf-kube-api-access-4zfxk\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.638195 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fb5fb95-b94e-4452-af1b-15cf34d847bf-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.959084 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.959082 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg" event={"ID":"1fb5fb95-b94e-4452-af1b-15cf34d847bf","Type":"ContainerDied","Data":"09f0938c186caecd239871f9621219410f58fb69fc9c99dfa21440e0249fdfd0"} Nov 29 07:22:34 crc kubenswrapper[4943]: I1129 07:22:34.960657 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09f0938c186caecd239871f9621219410f58fb69fc9c99dfa21440e0249fdfd0" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.040444 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24"] Nov 29 07:22:35 crc kubenswrapper[4943]: E1129 07:22:35.041025 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fb5fb95-b94e-4452-af1b-15cf34d847bf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.041092 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fb5fb95-b94e-4452-af1b-15cf34d847bf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.041308 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fb5fb95-b94e-4452-af1b-15cf34d847bf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.041917 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.049121 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.049121 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.049134 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.051876 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24"] Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.054994 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.146932 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.147037 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.147100 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffpgx\" (UniqueName: \"kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.249251 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.249465 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.249636 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffpgx\" (UniqueName: \"kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.256833 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.263644 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.270617 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffpgx\" (UniqueName: \"kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9xt24\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.343389 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00a997b6-77fe-4644-8034-6a35b7518421" path="/var/lib/kubelet/pods/00a997b6-77fe-4644-8034-6a35b7518421/volumes" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.358473 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.922935 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24"] Nov 29 07:22:35 crc kubenswrapper[4943]: W1129 07:22:35.926333 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36406046_f137_495b_9a13_cc3e0342f7da.slice/crio-b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d WatchSource:0}: Error finding container b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d: Status 404 returned error can't find the container with id b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d Nov 29 07:22:35 crc kubenswrapper[4943]: I1129 07:22:35.968959 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" event={"ID":"36406046-f137-495b-9a13-cc3e0342f7da","Type":"ContainerStarted","Data":"b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d"} Nov 29 07:22:36 crc kubenswrapper[4943]: I1129 07:22:36.979124 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" event={"ID":"36406046-f137-495b-9a13-cc3e0342f7da","Type":"ContainerStarted","Data":"6e591dfd109d8c800e4c069a5ed0a4a156db79e7bfd904edd2a2350ba0ef5521"} Nov 29 07:22:37 crc kubenswrapper[4943]: I1129 07:22:37.004218 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" podStartSLOduration=1.5420253179999999 podStartE2EDuration="2.004199043s" podCreationTimestamp="2025-11-29 07:22:35 +0000 UTC" firstStartedPulling="2025-11-29 07:22:35.929768935 +0000 UTC m=+2930.859857698" lastFinishedPulling="2025-11-29 07:22:36.39194265 +0000 UTC m=+2931.322031423" observedRunningTime="2025-11-29 07:22:36.995274175 +0000 UTC m=+2931.925362948" watchObservedRunningTime="2025-11-29 07:22:37.004199043 +0000 UTC m=+2931.934287796" Nov 29 07:22:42 crc kubenswrapper[4943]: I1129 07:22:42.028169 4943 generic.go:334] "Generic (PLEG): container finished" podID="36406046-f137-495b-9a13-cc3e0342f7da" containerID="6e591dfd109d8c800e4c069a5ed0a4a156db79e7bfd904edd2a2350ba0ef5521" exitCode=0 Nov 29 07:22:42 crc kubenswrapper[4943]: I1129 07:22:42.028255 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" event={"ID":"36406046-f137-495b-9a13-cc3e0342f7da","Type":"ContainerDied","Data":"6e591dfd109d8c800e4c069a5ed0a4a156db79e7bfd904edd2a2350ba0ef5521"} Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.509326 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.627846 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory\") pod \"36406046-f137-495b-9a13-cc3e0342f7da\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.627955 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key\") pod \"36406046-f137-495b-9a13-cc3e0342f7da\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.627981 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffpgx\" (UniqueName: \"kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx\") pod \"36406046-f137-495b-9a13-cc3e0342f7da\" (UID: \"36406046-f137-495b-9a13-cc3e0342f7da\") " Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.636935 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx" (OuterVolumeSpecName: "kube-api-access-ffpgx") pod "36406046-f137-495b-9a13-cc3e0342f7da" (UID: "36406046-f137-495b-9a13-cc3e0342f7da"). InnerVolumeSpecName "kube-api-access-ffpgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.658512 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory" (OuterVolumeSpecName: "inventory") pod "36406046-f137-495b-9a13-cc3e0342f7da" (UID: "36406046-f137-495b-9a13-cc3e0342f7da"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.680026 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "36406046-f137-495b-9a13-cc3e0342f7da" (UID: "36406046-f137-495b-9a13-cc3e0342f7da"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.732938 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.732986 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/36406046-f137-495b-9a13-cc3e0342f7da-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:43 crc kubenswrapper[4943]: I1129 07:22:43.733005 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffpgx\" (UniqueName: \"kubernetes.io/projected/36406046-f137-495b-9a13-cc3e0342f7da-kube-api-access-ffpgx\") on node \"crc\" DevicePath \"\"" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.071347 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" event={"ID":"36406046-f137-495b-9a13-cc3e0342f7da","Type":"ContainerDied","Data":"b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d"} Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.071397 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b750950eee1fa14ab3a1c5b236109131f2c02df91f06343527e1f1dc4527179d" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.071416 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.141344 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw"] Nov 29 07:22:44 crc kubenswrapper[4943]: E1129 07:22:44.141768 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36406046-f137-495b-9a13-cc3e0342f7da" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.141791 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="36406046-f137-495b-9a13-cc3e0342f7da" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.141964 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="36406046-f137-495b-9a13-cc3e0342f7da" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.142617 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.144711 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.145005 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.145660 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.155977 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.160997 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw"] Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.242842 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.242925 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzpxd\" (UniqueName: \"kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.242982 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.344435 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzpxd\" (UniqueName: \"kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.344587 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.344902 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.348398 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.348410 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.361317 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzpxd\" (UniqueName: \"kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f4kgw\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.514583 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:22:44 crc kubenswrapper[4943]: I1129 07:22:44.877763 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw"] Nov 29 07:22:45 crc kubenswrapper[4943]: I1129 07:22:45.086326 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" event={"ID":"84ef80f6-d172-490b-838b-7454e9254fc8","Type":"ContainerStarted","Data":"479d559f87f67c3d64e829e41198cb7a165c7380de101996242d52b54755250d"} Nov 29 07:22:45 crc kubenswrapper[4943]: I1129 07:22:45.366136 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:22:46 crc kubenswrapper[4943]: I1129 07:22:46.099936 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" event={"ID":"84ef80f6-d172-490b-838b-7454e9254fc8","Type":"ContainerStarted","Data":"15e9e615c48035d3d9ed6a5d3058d764f5132006784e756b90ee48d6902de49d"} Nov 29 07:22:46 crc kubenswrapper[4943]: I1129 07:22:46.133603 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" podStartSLOduration=1.654946898 podStartE2EDuration="2.133582705s" podCreationTimestamp="2025-11-29 07:22:44 +0000 UTC" firstStartedPulling="2025-11-29 07:22:44.883821203 +0000 UTC m=+2939.813909956" lastFinishedPulling="2025-11-29 07:22:45.362457 +0000 UTC m=+2940.292545763" observedRunningTime="2025-11-29 07:22:46.120725201 +0000 UTC m=+2941.050813964" watchObservedRunningTime="2025-11-29 07:22:46.133582705 +0000 UTC m=+2941.063671468" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.529164 4943 scope.go:117] "RemoveContainer" containerID="a1cdb08a04fe78ea7a9ec26fdf44580ba70fd66c8040c2f7e07a655e73668168" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.575139 4943 scope.go:117] "RemoveContainer" containerID="e0024c11bcf356bf3e610c3006da72054bc3ff482fcaeb5e291233a461671ebd" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.611261 4943 scope.go:117] "RemoveContainer" containerID="f95ea8857947a88346a5be477d9d7b70a6366fe3bc89a75539ce4779500a970f" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.650249 4943 scope.go:117] "RemoveContainer" containerID="9649aee915343f4875d096547dbec206b902f42010dd20035ba3fadbdc2c0268" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.683335 4943 scope.go:117] "RemoveContainer" containerID="6f6c601d2ad15d893b0eb2bcf658e32f1935e1ae81528cf262bc71375b1b5908" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.761526 4943 scope.go:117] "RemoveContainer" containerID="c9ab090e2256ac95cabac563f93a67ff5fbbc32bb33194b7a5adc6641f6a83bf" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.781954 4943 scope.go:117] "RemoveContainer" containerID="2dc7af98f42cb30a1c28f09495252b8eaa53c2cfad023af9d90ac47c523d7bdf" Nov 29 07:22:51 crc kubenswrapper[4943]: I1129 07:22:51.803582 4943 scope.go:117] "RemoveContainer" containerID="f2c96dca5fa6b3d1c87c32374f169e4bf3a0d16922f1714d254a2d13e981f232" Nov 29 07:23:28 crc kubenswrapper[4943]: I1129 07:23:28.071749 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-m6jf9"] Nov 29 07:23:28 crc kubenswrapper[4943]: I1129 07:23:28.081847 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-m6jf9"] Nov 29 07:23:28 crc kubenswrapper[4943]: I1129 07:23:28.517042 4943 generic.go:334] "Generic (PLEG): container finished" podID="84ef80f6-d172-490b-838b-7454e9254fc8" containerID="15e9e615c48035d3d9ed6a5d3058d764f5132006784e756b90ee48d6902de49d" exitCode=0 Nov 29 07:23:28 crc kubenswrapper[4943]: I1129 07:23:28.517097 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" event={"ID":"84ef80f6-d172-490b-838b-7454e9254fc8","Type":"ContainerDied","Data":"15e9e615c48035d3d9ed6a5d3058d764f5132006784e756b90ee48d6902de49d"} Nov 29 07:23:29 crc kubenswrapper[4943]: I1129 07:23:29.345680 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3301c2a-4575-4e54-a396-d31fb9c5e427" path="/var/lib/kubelet/pods/a3301c2a-4575-4e54-a396-d31fb9c5e427/volumes" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.033098 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.160241 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory\") pod \"84ef80f6-d172-490b-838b-7454e9254fc8\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.160385 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzpxd\" (UniqueName: \"kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd\") pod \"84ef80f6-d172-490b-838b-7454e9254fc8\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.160426 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key\") pod \"84ef80f6-d172-490b-838b-7454e9254fc8\" (UID: \"84ef80f6-d172-490b-838b-7454e9254fc8\") " Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.167812 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd" (OuterVolumeSpecName: "kube-api-access-fzpxd") pod "84ef80f6-d172-490b-838b-7454e9254fc8" (UID: "84ef80f6-d172-490b-838b-7454e9254fc8"). InnerVolumeSpecName "kube-api-access-fzpxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.185318 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory" (OuterVolumeSpecName: "inventory") pod "84ef80f6-d172-490b-838b-7454e9254fc8" (UID: "84ef80f6-d172-490b-838b-7454e9254fc8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.215852 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84ef80f6-d172-490b-838b-7454e9254fc8" (UID: "84ef80f6-d172-490b-838b-7454e9254fc8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.263610 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzpxd\" (UniqueName: \"kubernetes.io/projected/84ef80f6-d172-490b-838b-7454e9254fc8-kube-api-access-fzpxd\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.263694 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.263721 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ef80f6-d172-490b-838b-7454e9254fc8-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.540410 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" event={"ID":"84ef80f6-d172-490b-838b-7454e9254fc8","Type":"ContainerDied","Data":"479d559f87f67c3d64e829e41198cb7a165c7380de101996242d52b54755250d"} Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.540487 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="479d559f87f67c3d64e829e41198cb7a165c7380de101996242d52b54755250d" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.540506 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.703132 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p"] Nov 29 07:23:30 crc kubenswrapper[4943]: E1129 07:23:30.703543 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ef80f6-d172-490b-838b-7454e9254fc8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.703581 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ef80f6-d172-490b-838b-7454e9254fc8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.703784 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ef80f6-d172-490b-838b-7454e9254fc8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.704514 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.708695 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.708934 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.709389 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.711290 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.737106 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p"] Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.773912 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.774023 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.774080 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfn9l\" (UniqueName: \"kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.875745 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.875932 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.876014 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfn9l\" (UniqueName: \"kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.882136 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.884627 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:30 crc kubenswrapper[4943]: I1129 07:23:30.895717 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfn9l\" (UniqueName: \"kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:31 crc kubenswrapper[4943]: I1129 07:23:31.052273 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:31 crc kubenswrapper[4943]: I1129 07:23:31.616212 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p"] Nov 29 07:23:32 crc kubenswrapper[4943]: I1129 07:23:32.564009 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" event={"ID":"72f82e0b-2e5c-47b7-b12c-91fc6d945a43","Type":"ContainerStarted","Data":"c61e03b27d20b20ebe8146dbd4e10bd038fca0e20581642860080b7f43fbdd5e"} Nov 29 07:23:33 crc kubenswrapper[4943]: I1129 07:23:33.578681 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" event={"ID":"72f82e0b-2e5c-47b7-b12c-91fc6d945a43","Type":"ContainerStarted","Data":"da15e87baa1a5a844d39d46866ca3316e641a7656522b8983ac1b1ee332cd19d"} Nov 29 07:23:33 crc kubenswrapper[4943]: I1129 07:23:33.600788 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" podStartSLOduration=2.905483695 podStartE2EDuration="3.600763051s" podCreationTimestamp="2025-11-29 07:23:30 +0000 UTC" firstStartedPulling="2025-11-29 07:23:31.628370096 +0000 UTC m=+2986.558458839" lastFinishedPulling="2025-11-29 07:23:32.323649442 +0000 UTC m=+2987.253738195" observedRunningTime="2025-11-29 07:23:33.596450646 +0000 UTC m=+2988.526539459" watchObservedRunningTime="2025-11-29 07:23:33.600763051 +0000 UTC m=+2988.530851834" Nov 29 07:23:37 crc kubenswrapper[4943]: I1129 07:23:37.619436 4943 generic.go:334] "Generic (PLEG): container finished" podID="72f82e0b-2e5c-47b7-b12c-91fc6d945a43" containerID="da15e87baa1a5a844d39d46866ca3316e641a7656522b8983ac1b1ee332cd19d" exitCode=0 Nov 29 07:23:37 crc kubenswrapper[4943]: I1129 07:23:37.619552 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" event={"ID":"72f82e0b-2e5c-47b7-b12c-91fc6d945a43","Type":"ContainerDied","Data":"da15e87baa1a5a844d39d46866ca3316e641a7656522b8983ac1b1ee332cd19d"} Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.017787 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.130473 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key\") pod \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.130638 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory\") pod \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.130746 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfn9l\" (UniqueName: \"kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l\") pod \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\" (UID: \"72f82e0b-2e5c-47b7-b12c-91fc6d945a43\") " Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.135998 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l" (OuterVolumeSpecName: "kube-api-access-wfn9l") pod "72f82e0b-2e5c-47b7-b12c-91fc6d945a43" (UID: "72f82e0b-2e5c-47b7-b12c-91fc6d945a43"). InnerVolumeSpecName "kube-api-access-wfn9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.162146 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory" (OuterVolumeSpecName: "inventory") pod "72f82e0b-2e5c-47b7-b12c-91fc6d945a43" (UID: "72f82e0b-2e5c-47b7-b12c-91fc6d945a43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.163846 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "72f82e0b-2e5c-47b7-b12c-91fc6d945a43" (UID: "72f82e0b-2e5c-47b7-b12c-91fc6d945a43"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.234353 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.234420 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfn9l\" (UniqueName: \"kubernetes.io/projected/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-kube-api-access-wfn9l\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.234448 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72f82e0b-2e5c-47b7-b12c-91fc6d945a43-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.643635 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" event={"ID":"72f82e0b-2e5c-47b7-b12c-91fc6d945a43","Type":"ContainerDied","Data":"c61e03b27d20b20ebe8146dbd4e10bd038fca0e20581642860080b7f43fbdd5e"} Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.643714 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c61e03b27d20b20ebe8146dbd4e10bd038fca0e20581642860080b7f43fbdd5e" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.643736 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.733701 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc"] Nov 29 07:23:39 crc kubenswrapper[4943]: E1129 07:23:39.734133 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72f82e0b-2e5c-47b7-b12c-91fc6d945a43" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.734156 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72f82e0b-2e5c-47b7-b12c-91fc6d945a43" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.734401 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="72f82e0b-2e5c-47b7-b12c-91fc6d945a43" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.735183 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.738315 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.739064 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.739462 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.740103 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.749051 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc"] Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.846592 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.847010 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kbd5\" (UniqueName: \"kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.847208 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.949174 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kbd5\" (UniqueName: \"kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.949301 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.949371 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.957284 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.957345 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:39 crc kubenswrapper[4943]: I1129 07:23:39.970617 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kbd5\" (UniqueName: \"kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:40 crc kubenswrapper[4943]: I1129 07:23:40.055480 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:23:40 crc kubenswrapper[4943]: I1129 07:23:40.583785 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc"] Nov 29 07:23:40 crc kubenswrapper[4943]: I1129 07:23:40.654821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" event={"ID":"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5","Type":"ContainerStarted","Data":"5d71f0680f3a0953322953dc3a0edf159ff70d854b4b7bcbdbde094df93643d1"} Nov 29 07:23:41 crc kubenswrapper[4943]: I1129 07:23:41.062086 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-t85kr"] Nov 29 07:23:41 crc kubenswrapper[4943]: I1129 07:23:41.073721 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-t85kr"] Nov 29 07:23:41 crc kubenswrapper[4943]: I1129 07:23:41.359904 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e6b4461-55fa-4092-b3c4-bc414ea16f70" path="/var/lib/kubelet/pods/6e6b4461-55fa-4092-b3c4-bc414ea16f70/volumes" Nov 29 07:23:41 crc kubenswrapper[4943]: I1129 07:23:41.668912 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" event={"ID":"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5","Type":"ContainerStarted","Data":"ce328c135e925e8f845dedffc43d2737a3a4e9f0282fd9af3843384257fab2ac"} Nov 29 07:23:41 crc kubenswrapper[4943]: I1129 07:23:41.707464 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" podStartSLOduration=2.141314766 podStartE2EDuration="2.707446956s" podCreationTimestamp="2025-11-29 07:23:39 +0000 UTC" firstStartedPulling="2025-11-29 07:23:40.591368761 +0000 UTC m=+2995.521457514" lastFinishedPulling="2025-11-29 07:23:41.157500941 +0000 UTC m=+2996.087589704" observedRunningTime="2025-11-29 07:23:41.694635754 +0000 UTC m=+2996.624724557" watchObservedRunningTime="2025-11-29 07:23:41.707446956 +0000 UTC m=+2996.637535709" Nov 29 07:23:51 crc kubenswrapper[4943]: I1129 07:23:51.942009 4943 scope.go:117] "RemoveContainer" containerID="f4c31a8fb5196326cd984ff41e04d48ad5a2aebc72c4a42ca33a086806c151fa" Nov 29 07:23:52 crc kubenswrapper[4943]: I1129 07:23:52.014357 4943 scope.go:117] "RemoveContainer" containerID="eccb4dbe92cf56c6596c85432ca0f1425762c93367c44aaa8cd07ab960accd45" Nov 29 07:23:53 crc kubenswrapper[4943]: I1129 07:23:53.045669 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ws7ms"] Nov 29 07:23:53 crc kubenswrapper[4943]: I1129 07:23:53.060684 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ws7ms"] Nov 29 07:23:53 crc kubenswrapper[4943]: I1129 07:23:53.338733 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68a1344d-4cce-4597-894c-f167c42efe84" path="/var/lib/kubelet/pods/68a1344d-4cce-4597-894c-f167c42efe84/volumes" Nov 29 07:23:56 crc kubenswrapper[4943]: I1129 07:23:56.030769 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-6wspw"] Nov 29 07:23:56 crc kubenswrapper[4943]: I1129 07:23:56.044412 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-6wspw"] Nov 29 07:23:57 crc kubenswrapper[4943]: I1129 07:23:57.342469 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="157d6f54-3436-40f4-b2ef-c16933cbbc72" path="/var/lib/kubelet/pods/157d6f54-3436-40f4-b2ef-c16933cbbc72/volumes" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.071919 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.076000 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.126416 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.138329 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.138379 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwpnx\" (UniqueName: \"kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.138468 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.234852 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.237038 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.239950 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.240017 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwpnx\" (UniqueName: \"kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.240107 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.240891 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.240888 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.251310 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.267759 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwpnx\" (UniqueName: \"kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx\") pod \"certified-operators-xk5q8\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.341380 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.341762 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.341877 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd4hg\" (UniqueName: \"kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.416394 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.444051 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd4hg\" (UniqueName: \"kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.444206 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.444309 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.448603 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.448861 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.467791 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd4hg\" (UniqueName: \"kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg\") pod \"community-operators-pqq48\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.554895 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:18 crc kubenswrapper[4943]: I1129 07:24:18.995257 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:18 crc kubenswrapper[4943]: W1129 07:24:18.997697 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6b11320_92fd_4516_aac0_5a5935d6abba.slice/crio-12533329999c15cb4f55898e9e0c19539cf0aba4b5d82f22d437de7225c352df WatchSource:0}: Error finding container 12533329999c15cb4f55898e9e0c19539cf0aba4b5d82f22d437de7225c352df: Status 404 returned error can't find the container with id 12533329999c15cb4f55898e9e0c19539cf0aba4b5d82f22d437de7225c352df Nov 29 07:24:19 crc kubenswrapper[4943]: I1129 07:24:19.070818 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:19 crc kubenswrapper[4943]: I1129 07:24:19.098425 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerStarted","Data":"d72cf972522fd385da7fa7a206790e7a3a1aeb6c6b3ea3fe0aeaea9da83f2872"} Nov 29 07:24:19 crc kubenswrapper[4943]: I1129 07:24:19.099724 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerStarted","Data":"12533329999c15cb4f55898e9e0c19539cf0aba4b5d82f22d437de7225c352df"} Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.083086 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-sf2xk"] Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.097680 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-sf2xk"] Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.113276 4943 generic.go:334] "Generic (PLEG): container finished" podID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerID="0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455" exitCode=0 Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.114249 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerDied","Data":"0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455"} Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.117490 4943 generic.go:334] "Generic (PLEG): container finished" podID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerID="bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f" exitCode=0 Nov 29 07:24:20 crc kubenswrapper[4943]: I1129 07:24:20.117529 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerDied","Data":"bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f"} Nov 29 07:24:21 crc kubenswrapper[4943]: I1129 07:24:21.032513 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4qtlw"] Nov 29 07:24:21 crc kubenswrapper[4943]: I1129 07:24:21.042808 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4qtlw"] Nov 29 07:24:21 crc kubenswrapper[4943]: I1129 07:24:21.351170 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3fda5f5-7547-4aec-b6ce-55b5b7434e86" path="/var/lib/kubelet/pods/b3fda5f5-7547-4aec-b6ce-55b5b7434e86/volumes" Nov 29 07:24:21 crc kubenswrapper[4943]: I1129 07:24:21.352689 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e88bdece-8370-4e3e-9127-a932d4452f5b" path="/var/lib/kubelet/pods/e88bdece-8370-4e3e-9127-a932d4452f5b/volumes" Nov 29 07:24:23 crc kubenswrapper[4943]: I1129 07:24:23.152354 4943 generic.go:334] "Generic (PLEG): container finished" podID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerID="1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7" exitCode=0 Nov 29 07:24:23 crc kubenswrapper[4943]: I1129 07:24:23.153541 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerDied","Data":"1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7"} Nov 29 07:24:23 crc kubenswrapper[4943]: I1129 07:24:23.157870 4943 generic.go:334] "Generic (PLEG): container finished" podID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerID="067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f" exitCode=0 Nov 29 07:24:23 crc kubenswrapper[4943]: I1129 07:24:23.157912 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerDied","Data":"067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f"} Nov 29 07:24:24 crc kubenswrapper[4943]: I1129 07:24:24.172193 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerStarted","Data":"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7"} Nov 29 07:24:24 crc kubenswrapper[4943]: I1129 07:24:24.175213 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerStarted","Data":"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345"} Nov 29 07:24:24 crc kubenswrapper[4943]: I1129 07:24:24.199299 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xk5q8" podStartSLOduration=2.730468462 podStartE2EDuration="6.19928388s" podCreationTimestamp="2025-11-29 07:24:18 +0000 UTC" firstStartedPulling="2025-11-29 07:24:20.116742173 +0000 UTC m=+3035.046830936" lastFinishedPulling="2025-11-29 07:24:23.585557581 +0000 UTC m=+3038.515646354" observedRunningTime="2025-11-29 07:24:24.191697976 +0000 UTC m=+3039.121786769" watchObservedRunningTime="2025-11-29 07:24:24.19928388 +0000 UTC m=+3039.129372633" Nov 29 07:24:24 crc kubenswrapper[4943]: I1129 07:24:24.226348 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pqq48" podStartSLOduration=2.706264002 podStartE2EDuration="6.22633077s" podCreationTimestamp="2025-11-29 07:24:18 +0000 UTC" firstStartedPulling="2025-11-29 07:24:20.119409238 +0000 UTC m=+3035.049498001" lastFinishedPulling="2025-11-29 07:24:23.639476006 +0000 UTC m=+3038.569564769" observedRunningTime="2025-11-29 07:24:24.222285451 +0000 UTC m=+3039.152374264" watchObservedRunningTime="2025-11-29 07:24:24.22633077 +0000 UTC m=+3039.156419523" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.417356 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.418557 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.479941 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.556555 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.556617 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:28 crc kubenswrapper[4943]: I1129 07:24:28.632519 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:29 crc kubenswrapper[4943]: I1129 07:24:29.309828 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:29 crc kubenswrapper[4943]: I1129 07:24:29.322379 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:29 crc kubenswrapper[4943]: I1129 07:24:29.833088 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:31 crc kubenswrapper[4943]: I1129 07:24:31.253554 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pqq48" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="registry-server" containerID="cri-o://eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345" gracePeriod=2 Nov 29 07:24:31 crc kubenswrapper[4943]: I1129 07:24:31.626199 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:31 crc kubenswrapper[4943]: I1129 07:24:31.626456 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xk5q8" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="registry-server" containerID="cri-o://c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7" gracePeriod=2 Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.134080 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.146717 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247138 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities\") pod \"188ff0b9-aa79-45d1-82a3-b166a7985524\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247312 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd4hg\" (UniqueName: \"kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg\") pod \"188ff0b9-aa79-45d1-82a3-b166a7985524\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247379 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities\") pod \"e6b11320-92fd-4516-aac0-5a5935d6abba\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247416 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwpnx\" (UniqueName: \"kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx\") pod \"e6b11320-92fd-4516-aac0-5a5935d6abba\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247462 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content\") pod \"188ff0b9-aa79-45d1-82a3-b166a7985524\" (UID: \"188ff0b9-aa79-45d1-82a3-b166a7985524\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.247526 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content\") pod \"e6b11320-92fd-4516-aac0-5a5935d6abba\" (UID: \"e6b11320-92fd-4516-aac0-5a5935d6abba\") " Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.248460 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities" (OuterVolumeSpecName: "utilities") pod "188ff0b9-aa79-45d1-82a3-b166a7985524" (UID: "188ff0b9-aa79-45d1-82a3-b166a7985524"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.256214 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities" (OuterVolumeSpecName: "utilities") pod "e6b11320-92fd-4516-aac0-5a5935d6abba" (UID: "e6b11320-92fd-4516-aac0-5a5935d6abba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.258932 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg" (OuterVolumeSpecName: "kube-api-access-bd4hg") pod "188ff0b9-aa79-45d1-82a3-b166a7985524" (UID: "188ff0b9-aa79-45d1-82a3-b166a7985524"). InnerVolumeSpecName "kube-api-access-bd4hg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.267746 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx" (OuterVolumeSpecName: "kube-api-access-lwpnx") pod "e6b11320-92fd-4516-aac0-5a5935d6abba" (UID: "e6b11320-92fd-4516-aac0-5a5935d6abba"). InnerVolumeSpecName "kube-api-access-lwpnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.334474 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "188ff0b9-aa79-45d1-82a3-b166a7985524" (UID: "188ff0b9-aa79-45d1-82a3-b166a7985524"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.334857 4943 generic.go:334] "Generic (PLEG): container finished" podID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerID="c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7" exitCode=0 Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.334971 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerDied","Data":"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7"} Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.335010 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk5q8" event={"ID":"e6b11320-92fd-4516-aac0-5a5935d6abba","Type":"ContainerDied","Data":"12533329999c15cb4f55898e9e0c19539cf0aba4b5d82f22d437de7225c352df"} Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.335036 4943 scope.go:117] "RemoveContainer" containerID="c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.335199 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk5q8" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.355443 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.355469 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwpnx\" (UniqueName: \"kubernetes.io/projected/e6b11320-92fd-4516-aac0-5a5935d6abba-kube-api-access-lwpnx\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.355478 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.355488 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/188ff0b9-aa79-45d1-82a3-b166a7985524-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.355497 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd4hg\" (UniqueName: \"kubernetes.io/projected/188ff0b9-aa79-45d1-82a3-b166a7985524-kube-api-access-bd4hg\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.366954 4943 generic.go:334] "Generic (PLEG): container finished" podID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerID="eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345" exitCode=0 Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.366997 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerDied","Data":"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345"} Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.367022 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqq48" event={"ID":"188ff0b9-aa79-45d1-82a3-b166a7985524","Type":"ContainerDied","Data":"d72cf972522fd385da7fa7a206790e7a3a1aeb6c6b3ea3fe0aeaea9da83f2872"} Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.367075 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqq48" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.385670 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6b11320-92fd-4516-aac0-5a5935d6abba" (UID: "e6b11320-92fd-4516-aac0-5a5935d6abba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.386076 4943 scope.go:117] "RemoveContainer" containerID="1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.414947 4943 scope.go:117] "RemoveContainer" containerID="0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.424725 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.431303 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pqq48"] Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.451403 4943 scope.go:117] "RemoveContainer" containerID="c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.451859 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7\": container with ID starting with c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7 not found: ID does not exist" containerID="c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.451908 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7"} err="failed to get container status \"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7\": rpc error: code = NotFound desc = could not find container \"c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7\": container with ID starting with c2a2ceef53feecb0f7d51968ed1553627a096f183f95378cf464b3b0b49ba1f7 not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.451940 4943 scope.go:117] "RemoveContainer" containerID="1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.452494 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7\": container with ID starting with 1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7 not found: ID does not exist" containerID="1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.452536 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7"} err="failed to get container status \"1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7\": rpc error: code = NotFound desc = could not find container \"1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7\": container with ID starting with 1e633fedddba04b83ca9b5c03c9830e5940454eac6d142af22e4a7cc9cebe7b7 not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.452577 4943 scope.go:117] "RemoveContainer" containerID="0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.452959 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455\": container with ID starting with 0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455 not found: ID does not exist" containerID="0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.452987 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455"} err="failed to get container status \"0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455\": rpc error: code = NotFound desc = could not find container \"0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455\": container with ID starting with 0a1caa9423a21abf8316907a045b5302a069bf22bee388dae527b3da851da455 not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.453003 4943 scope.go:117] "RemoveContainer" containerID="eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.458910 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b11320-92fd-4516-aac0-5a5935d6abba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.486170 4943 scope.go:117] "RemoveContainer" containerID="067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.506307 4943 scope.go:117] "RemoveContainer" containerID="bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.575193 4943 scope.go:117] "RemoveContainer" containerID="eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.577144 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345\": container with ID starting with eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345 not found: ID does not exist" containerID="eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.577176 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345"} err="failed to get container status \"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345\": rpc error: code = NotFound desc = could not find container \"eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345\": container with ID starting with eb4a168920149cd391a3a08a6b07ccc216a05362fa0214bd3d60e632aaf5a345 not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.577197 4943 scope.go:117] "RemoveContainer" containerID="067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.577720 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f\": container with ID starting with 067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f not found: ID does not exist" containerID="067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.577837 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f"} err="failed to get container status \"067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f\": rpc error: code = NotFound desc = could not find container \"067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f\": container with ID starting with 067d352dde7742849fef8e64269d7720de1bc2d97745dc84d7b7f08a14da793f not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.577912 4943 scope.go:117] "RemoveContainer" containerID="bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f" Nov 29 07:24:32 crc kubenswrapper[4943]: E1129 07:24:32.578422 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f\": container with ID starting with bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f not found: ID does not exist" containerID="bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.578451 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f"} err="failed to get container status \"bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f\": rpc error: code = NotFound desc = could not find container \"bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f\": container with ID starting with bca17edaf43fa58cad791f0f00454a2a898b4de21006437857635327bb8ef27f not found: ID does not exist" Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.674507 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:32 crc kubenswrapper[4943]: I1129 07:24:32.680420 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xk5q8"] Nov 29 07:24:33 crc kubenswrapper[4943]: I1129 07:24:33.350223 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" path="/var/lib/kubelet/pods/188ff0b9-aa79-45d1-82a3-b166a7985524/volumes" Nov 29 07:24:33 crc kubenswrapper[4943]: I1129 07:24:33.352068 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" path="/var/lib/kubelet/pods/e6b11320-92fd-4516-aac0-5a5935d6abba/volumes" Nov 29 07:24:38 crc kubenswrapper[4943]: I1129 07:24:38.436910 4943 generic.go:334] "Generic (PLEG): container finished" podID="49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" containerID="ce328c135e925e8f845dedffc43d2737a3a4e9f0282fd9af3843384257fab2ac" exitCode=0 Nov 29 07:24:38 crc kubenswrapper[4943]: I1129 07:24:38.437679 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" event={"ID":"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5","Type":"ContainerDied","Data":"ce328c135e925e8f845dedffc43d2737a3a4e9f0282fd9af3843384257fab2ac"} Nov 29 07:24:39 crc kubenswrapper[4943]: I1129 07:24:39.917212 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.019009 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kbd5\" (UniqueName: \"kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5\") pod \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.019065 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory\") pod \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.019113 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key\") pod \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\" (UID: \"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5\") " Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.026086 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5" (OuterVolumeSpecName: "kube-api-access-5kbd5") pod "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" (UID: "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5"). InnerVolumeSpecName "kube-api-access-5kbd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.053541 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" (UID: "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.054392 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory" (OuterVolumeSpecName: "inventory") pod "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" (UID: "49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.122125 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kbd5\" (UniqueName: \"kubernetes.io/projected/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-kube-api-access-5kbd5\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.122173 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.122192 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.462421 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" event={"ID":"49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5","Type":"ContainerDied","Data":"5d71f0680f3a0953322953dc3a0edf159ff70d854b4b7bcbdbde094df93643d1"} Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.462464 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d71f0680f3a0953322953dc3a0edf159ff70d854b4b7bcbdbde094df93643d1" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.462520 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.585732 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9sgr6"] Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586258 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="extract-utilities" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586281 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="extract-utilities" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586299 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586310 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586332 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="extract-utilities" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586342 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="extract-utilities" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586350 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="extract-content" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586361 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="extract-content" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586377 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="extract-content" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586385 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="extract-content" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586406 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586414 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: E1129 07:24:40.586443 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586455 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586718 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586756 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b11320-92fd-4516-aac0-5a5935d6abba" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.586781 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="188ff0b9-aa79-45d1-82a3-b166a7985524" containerName="registry-server" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.587689 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.590417 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.594974 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.595651 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.595802 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.604351 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9sgr6"] Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.732333 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.732493 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9xjg\" (UniqueName: \"kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.732586 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.834583 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9xjg\" (UniqueName: \"kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.834653 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.834727 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.839615 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.840100 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.856101 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9xjg\" (UniqueName: \"kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg\") pod \"ssh-known-hosts-edpm-deployment-9sgr6\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:40 crc kubenswrapper[4943]: I1129 07:24:40.912695 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:41 crc kubenswrapper[4943]: I1129 07:24:41.336819 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9sgr6"] Nov 29 07:24:41 crc kubenswrapper[4943]: I1129 07:24:41.472031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" event={"ID":"62ec2c98-2d89-4018-8a5f-5f90f892935f","Type":"ContainerStarted","Data":"e3481f65db131fbceaf06b45c1b2de7c8e2e313f5110d02e357c3011512075b1"} Nov 29 07:24:42 crc kubenswrapper[4943]: I1129 07:24:42.485363 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" event={"ID":"62ec2c98-2d89-4018-8a5f-5f90f892935f","Type":"ContainerStarted","Data":"f8c8003642d412e91403563830047cd3040ef94750552ac2d39df78eda6ef06f"} Nov 29 07:24:43 crc kubenswrapper[4943]: I1129 07:24:43.514120 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" podStartSLOduration=2.877488348 podStartE2EDuration="3.514102376s" podCreationTimestamp="2025-11-29 07:24:40 +0000 UTC" firstStartedPulling="2025-11-29 07:24:41.339786208 +0000 UTC m=+3056.269875001" lastFinishedPulling="2025-11-29 07:24:41.976400266 +0000 UTC m=+3056.906489029" observedRunningTime="2025-11-29 07:24:43.509724959 +0000 UTC m=+3058.439813732" watchObservedRunningTime="2025-11-29 07:24:43.514102376 +0000 UTC m=+3058.444191129" Nov 29 07:24:50 crc kubenswrapper[4943]: I1129 07:24:50.563999 4943 generic.go:334] "Generic (PLEG): container finished" podID="62ec2c98-2d89-4018-8a5f-5f90f892935f" containerID="f8c8003642d412e91403563830047cd3040ef94750552ac2d39df78eda6ef06f" exitCode=0 Nov 29 07:24:50 crc kubenswrapper[4943]: I1129 07:24:50.564138 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" event={"ID":"62ec2c98-2d89-4018-8a5f-5f90f892935f","Type":"ContainerDied","Data":"f8c8003642d412e91403563830047cd3040ef94750552ac2d39df78eda6ef06f"} Nov 29 07:24:51 crc kubenswrapper[4943]: I1129 07:24:51.987614 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.052939 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9xjg\" (UniqueName: \"kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg\") pod \"62ec2c98-2d89-4018-8a5f-5f90f892935f\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.053293 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam\") pod \"62ec2c98-2d89-4018-8a5f-5f90f892935f\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.054787 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0\") pod \"62ec2c98-2d89-4018-8a5f-5f90f892935f\" (UID: \"62ec2c98-2d89-4018-8a5f-5f90f892935f\") " Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.058415 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg" (OuterVolumeSpecName: "kube-api-access-k9xjg") pod "62ec2c98-2d89-4018-8a5f-5f90f892935f" (UID: "62ec2c98-2d89-4018-8a5f-5f90f892935f"). InnerVolumeSpecName "kube-api-access-k9xjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.108030 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "62ec2c98-2d89-4018-8a5f-5f90f892935f" (UID: "62ec2c98-2d89-4018-8a5f-5f90f892935f"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.109111 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "62ec2c98-2d89-4018-8a5f-5f90f892935f" (UID: "62ec2c98-2d89-4018-8a5f-5f90f892935f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.134300 4943 scope.go:117] "RemoveContainer" containerID="cd47a9ba2912637f0e86177785d721d55cb1fe91a746f124c3b58cb6eab8b3bc" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.158040 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9xjg\" (UniqueName: \"kubernetes.io/projected/62ec2c98-2d89-4018-8a5f-5f90f892935f-kube-api-access-k9xjg\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.158086 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.158103 4943 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/62ec2c98-2d89-4018-8a5f-5f90f892935f-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.214521 4943 scope.go:117] "RemoveContainer" containerID="f31e75c0f1ca180fa5447d8b86062d81137a5c8467e221f4fcd473d25403794a" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.292270 4943 scope.go:117] "RemoveContainer" containerID="391dce06ed9d044c50a78e5dfba711255d7f190bb557f7d231cee40b8757f07c" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.328217 4943 scope.go:117] "RemoveContainer" containerID="4ffeada44816bc9d2238de419bbe56d443a99b305ef92427a376d41d563da45b" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.587982 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" event={"ID":"62ec2c98-2d89-4018-8a5f-5f90f892935f","Type":"ContainerDied","Data":"e3481f65db131fbceaf06b45c1b2de7c8e2e313f5110d02e357c3011512075b1"} Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.588042 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3481f65db131fbceaf06b45c1b2de7c8e2e313f5110d02e357c3011512075b1" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.588045 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9sgr6" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.779288 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk"] Nov 29 07:24:52 crc kubenswrapper[4943]: E1129 07:24:52.779862 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62ec2c98-2d89-4018-8a5f-5f90f892935f" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.779891 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="62ec2c98-2d89-4018-8a5f-5f90f892935f" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.780292 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="62ec2c98-2d89-4018-8a5f-5f90f892935f" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.781734 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.784668 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.789201 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.789373 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.790006 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.809251 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk"] Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.870513 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.870597 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z45vf\" (UniqueName: \"kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.871092 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.973479 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.973909 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.973944 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z45vf\" (UniqueName: \"kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.979491 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.979521 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:52 crc kubenswrapper[4943]: I1129 07:24:52.995127 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z45vf\" (UniqueName: \"kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dzcfk\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:53 crc kubenswrapper[4943]: I1129 07:24:53.105097 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:24:53 crc kubenswrapper[4943]: I1129 07:24:53.630929 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk"] Nov 29 07:24:54 crc kubenswrapper[4943]: I1129 07:24:54.612122 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" event={"ID":"b073e871-7545-48cd-a3d1-73347171b5dc","Type":"ContainerStarted","Data":"a91b742e3f1ae545f8422401ed388733ba9ca38f9db5120293cb59b649459b9f"} Nov 29 07:24:54 crc kubenswrapper[4943]: I1129 07:24:54.612429 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" event={"ID":"b073e871-7545-48cd-a3d1-73347171b5dc","Type":"ContainerStarted","Data":"d061b74376df7bb6c6a4d838af40a6d0b45576c3127c1463f9999201ffeda84d"} Nov 29 07:24:54 crc kubenswrapper[4943]: I1129 07:24:54.632404 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" podStartSLOduration=2.088279603 podStartE2EDuration="2.632381955s" podCreationTimestamp="2025-11-29 07:24:52 +0000 UTC" firstStartedPulling="2025-11-29 07:24:53.639340771 +0000 UTC m=+3068.569429524" lastFinishedPulling="2025-11-29 07:24:54.183443133 +0000 UTC m=+3069.113531876" observedRunningTime="2025-11-29 07:24:54.632286843 +0000 UTC m=+3069.562375636" watchObservedRunningTime="2025-11-29 07:24:54.632381955 +0000 UTC m=+3069.562470708" Nov 29 07:25:02 crc kubenswrapper[4943]: I1129 07:25:02.613451 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:25:02 crc kubenswrapper[4943]: I1129 07:25:02.614020 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:25:03 crc kubenswrapper[4943]: I1129 07:25:03.704996 4943 generic.go:334] "Generic (PLEG): container finished" podID="b073e871-7545-48cd-a3d1-73347171b5dc" containerID="a91b742e3f1ae545f8422401ed388733ba9ca38f9db5120293cb59b649459b9f" exitCode=0 Nov 29 07:25:03 crc kubenswrapper[4943]: I1129 07:25:03.705080 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" event={"ID":"b073e871-7545-48cd-a3d1-73347171b5dc","Type":"ContainerDied","Data":"a91b742e3f1ae545f8422401ed388733ba9ca38f9db5120293cb59b649459b9f"} Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.048761 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-7dwzg"] Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.062689 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-7dwzg"] Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.170133 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.216728 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z45vf\" (UniqueName: \"kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf\") pod \"b073e871-7545-48cd-a3d1-73347171b5dc\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.216781 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key\") pod \"b073e871-7545-48cd-a3d1-73347171b5dc\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.216859 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory\") pod \"b073e871-7545-48cd-a3d1-73347171b5dc\" (UID: \"b073e871-7545-48cd-a3d1-73347171b5dc\") " Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.222287 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf" (OuterVolumeSpecName: "kube-api-access-z45vf") pod "b073e871-7545-48cd-a3d1-73347171b5dc" (UID: "b073e871-7545-48cd-a3d1-73347171b5dc"). InnerVolumeSpecName "kube-api-access-z45vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.244773 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b073e871-7545-48cd-a3d1-73347171b5dc" (UID: "b073e871-7545-48cd-a3d1-73347171b5dc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.264456 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory" (OuterVolumeSpecName: "inventory") pod "b073e871-7545-48cd-a3d1-73347171b5dc" (UID: "b073e871-7545-48cd-a3d1-73347171b5dc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.318931 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.319015 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b073e871-7545-48cd-a3d1-73347171b5dc-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.319045 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z45vf\" (UniqueName: \"kubernetes.io/projected/b073e871-7545-48cd-a3d1-73347171b5dc-kube-api-access-z45vf\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.340117 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de1c9351-d792-4a07-94d1-24b480b1ec3b" path="/var/lib/kubelet/pods/de1c9351-d792-4a07-94d1-24b480b1ec3b/volumes" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.742952 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" event={"ID":"b073e871-7545-48cd-a3d1-73347171b5dc","Type":"ContainerDied","Data":"d061b74376df7bb6c6a4d838af40a6d0b45576c3127c1463f9999201ffeda84d"} Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.743049 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d061b74376df7bb6c6a4d838af40a6d0b45576c3127c1463f9999201ffeda84d" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.743150 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.810070 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv"] Nov 29 07:25:05 crc kubenswrapper[4943]: E1129 07:25:05.810542 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b073e871-7545-48cd-a3d1-73347171b5dc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.810582 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b073e871-7545-48cd-a3d1-73347171b5dc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.810822 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b073e871-7545-48cd-a3d1-73347171b5dc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.811842 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.814272 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.814552 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.814929 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.814998 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.819036 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv"] Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.935468 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.935555 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:05 crc kubenswrapper[4943]: I1129 07:25:05.935968 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm447\" (UniqueName: \"kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.038078 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm447\" (UniqueName: \"kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.038177 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.038222 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.042258 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.042737 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.055355 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm447\" (UniqueName: \"kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.134023 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.680769 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv"] Nov 29 07:25:06 crc kubenswrapper[4943]: I1129 07:25:06.754924 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" event={"ID":"1a4c5e98-1854-4892-985f-00631c232dc5","Type":"ContainerStarted","Data":"400f4e5ff33cda69d835e67c843155af0afa2e59b27b687b5729a12ebf0b6032"} Nov 29 07:25:08 crc kubenswrapper[4943]: I1129 07:25:08.776660 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" event={"ID":"1a4c5e98-1854-4892-985f-00631c232dc5","Type":"ContainerStarted","Data":"0c36c5fad6299299ab0f4709f9d10ff3c251481ab11aa1f023a397a3fba41b79"} Nov 29 07:25:08 crc kubenswrapper[4943]: I1129 07:25:08.796204 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" podStartSLOduration=2.448665422 podStartE2EDuration="3.796184117s" podCreationTimestamp="2025-11-29 07:25:05 +0000 UTC" firstStartedPulling="2025-11-29 07:25:06.688948535 +0000 UTC m=+3081.619037288" lastFinishedPulling="2025-11-29 07:25:08.03646719 +0000 UTC m=+3082.966555983" observedRunningTime="2025-11-29 07:25:08.793991364 +0000 UTC m=+3083.724080177" watchObservedRunningTime="2025-11-29 07:25:08.796184117 +0000 UTC m=+3083.726272880" Nov 29 07:25:18 crc kubenswrapper[4943]: I1129 07:25:18.880187 4943 generic.go:334] "Generic (PLEG): container finished" podID="1a4c5e98-1854-4892-985f-00631c232dc5" containerID="0c36c5fad6299299ab0f4709f9d10ff3c251481ab11aa1f023a397a3fba41b79" exitCode=0 Nov 29 07:25:18 crc kubenswrapper[4943]: I1129 07:25:18.880678 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" event={"ID":"1a4c5e98-1854-4892-985f-00631c232dc5","Type":"ContainerDied","Data":"0c36c5fad6299299ab0f4709f9d10ff3c251481ab11aa1f023a397a3fba41b79"} Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.324755 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.511083 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory\") pod \"1a4c5e98-1854-4892-985f-00631c232dc5\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.511238 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rm447\" (UniqueName: \"kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447\") pod \"1a4c5e98-1854-4892-985f-00631c232dc5\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.511472 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key\") pod \"1a4c5e98-1854-4892-985f-00631c232dc5\" (UID: \"1a4c5e98-1854-4892-985f-00631c232dc5\") " Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.522148 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447" (OuterVolumeSpecName: "kube-api-access-rm447") pod "1a4c5e98-1854-4892-985f-00631c232dc5" (UID: "1a4c5e98-1854-4892-985f-00631c232dc5"). InnerVolumeSpecName "kube-api-access-rm447". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.544317 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory" (OuterVolumeSpecName: "inventory") pod "1a4c5e98-1854-4892-985f-00631c232dc5" (UID: "1a4c5e98-1854-4892-985f-00631c232dc5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.559797 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1a4c5e98-1854-4892-985f-00631c232dc5" (UID: "1a4c5e98-1854-4892-985f-00631c232dc5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.619762 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.619819 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a4c5e98-1854-4892-985f-00631c232dc5-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.619837 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rm447\" (UniqueName: \"kubernetes.io/projected/1a4c5e98-1854-4892-985f-00631c232dc5-kube-api-access-rm447\") on node \"crc\" DevicePath \"\"" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.895023 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" event={"ID":"1a4c5e98-1854-4892-985f-00631c232dc5","Type":"ContainerDied","Data":"400f4e5ff33cda69d835e67c843155af0afa2e59b27b687b5729a12ebf0b6032"} Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.895063 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="400f4e5ff33cda69d835e67c843155af0afa2e59b27b687b5729a12ebf0b6032" Nov 29 07:25:20 crc kubenswrapper[4943]: I1129 07:25:20.895077 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv" Nov 29 07:25:32 crc kubenswrapper[4943]: I1129 07:25:32.613049 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:25:32 crc kubenswrapper[4943]: I1129 07:25:32.614802 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:25:52 crc kubenswrapper[4943]: I1129 07:25:52.450040 4943 scope.go:117] "RemoveContainer" containerID="1ef5568db0c7bb39afa9411d4dc05a41a7727e31d83acfc551c5be03668d7e46" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.041275 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:25:55 crc kubenswrapper[4943]: E1129 07:25:55.042266 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4c5e98-1854-4892-985f-00631c232dc5" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.042291 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4c5e98-1854-4892-985f-00631c232dc5" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.042692 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4c5e98-1854-4892-985f-00631c232dc5" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.045092 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.057640 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.210767 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.210844 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.210998 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v2s2\" (UniqueName: \"kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.313134 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v2s2\" (UniqueName: \"kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.313249 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.313304 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.313860 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.314509 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.337185 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v2s2\" (UniqueName: \"kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2\") pod \"redhat-marketplace-ptj66\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.379988 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:25:55 crc kubenswrapper[4943]: I1129 07:25:55.907672 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:25:56 crc kubenswrapper[4943]: I1129 07:25:56.264913 4943 generic.go:334] "Generic (PLEG): container finished" podID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerID="45fe043388d52cfedec8d2cc4b3e9b5a2c00964a3e40e9b491ca0fa54e8c7ced" exitCode=0 Nov 29 07:25:56 crc kubenswrapper[4943]: I1129 07:25:56.265014 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerDied","Data":"45fe043388d52cfedec8d2cc4b3e9b5a2c00964a3e40e9b491ca0fa54e8c7ced"} Nov 29 07:25:56 crc kubenswrapper[4943]: I1129 07:25:56.265333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerStarted","Data":"df7364c096a9b0bd3cfcde11a0faeb5e8feaaaeae4376a99d2cecf270f742b38"} Nov 29 07:25:56 crc kubenswrapper[4943]: I1129 07:25:56.266845 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:25:58 crc kubenswrapper[4943]: I1129 07:25:58.292462 4943 generic.go:334] "Generic (PLEG): container finished" podID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerID="a0132c4967ab21c9f1a345f25df763744b46d0c5799c0123e044f88311162e94" exitCode=0 Nov 29 07:25:58 crc kubenswrapper[4943]: I1129 07:25:58.292515 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerDied","Data":"a0132c4967ab21c9f1a345f25df763744b46d0c5799c0123e044f88311162e94"} Nov 29 07:25:59 crc kubenswrapper[4943]: I1129 07:25:59.304062 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerStarted","Data":"a59aa553ba0142aa78b5eb0112aa591ab967e9b0ed4bb9143cc5f2b616152759"} Nov 29 07:25:59 crc kubenswrapper[4943]: I1129 07:25:59.325911 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ptj66" podStartSLOduration=1.678350471 podStartE2EDuration="4.325890482s" podCreationTimestamp="2025-11-29 07:25:55 +0000 UTC" firstStartedPulling="2025-11-29 07:25:56.266598327 +0000 UTC m=+3131.196687080" lastFinishedPulling="2025-11-29 07:25:58.914138338 +0000 UTC m=+3133.844227091" observedRunningTime="2025-11-29 07:25:59.32414794 +0000 UTC m=+3134.254236723" watchObservedRunningTime="2025-11-29 07:25:59.325890482 +0000 UTC m=+3134.255979245" Nov 29 07:26:02 crc kubenswrapper[4943]: I1129 07:26:02.614040 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:26:02 crc kubenswrapper[4943]: I1129 07:26:02.614436 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:26:02 crc kubenswrapper[4943]: I1129 07:26:02.614483 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:26:02 crc kubenswrapper[4943]: I1129 07:26:02.615311 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:26:02 crc kubenswrapper[4943]: I1129 07:26:02.615385 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" gracePeriod=600 Nov 29 07:26:02 crc kubenswrapper[4943]: E1129 07:26:02.748436 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:26:03 crc kubenswrapper[4943]: I1129 07:26:03.357379 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" exitCode=0 Nov 29 07:26:03 crc kubenswrapper[4943]: I1129 07:26:03.357432 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb"} Nov 29 07:26:03 crc kubenswrapper[4943]: I1129 07:26:03.357468 4943 scope.go:117] "RemoveContainer" containerID="1cc8be61dbd9a577187fe2be49ff68ff23612b0fc224b5644b1fb190c4a851c6" Nov 29 07:26:03 crc kubenswrapper[4943]: I1129 07:26:03.358353 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:26:03 crc kubenswrapper[4943]: E1129 07:26:03.358817 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:26:05 crc kubenswrapper[4943]: I1129 07:26:05.381035 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:05 crc kubenswrapper[4943]: I1129 07:26:05.381427 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:05 crc kubenswrapper[4943]: I1129 07:26:05.445121 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:06 crc kubenswrapper[4943]: I1129 07:26:06.454745 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:06 crc kubenswrapper[4943]: I1129 07:26:06.505635 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:26:08 crc kubenswrapper[4943]: I1129 07:26:08.406669 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ptj66" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="registry-server" containerID="cri-o://a59aa553ba0142aa78b5eb0112aa591ab967e9b0ed4bb9143cc5f2b616152759" gracePeriod=2 Nov 29 07:26:09 crc kubenswrapper[4943]: I1129 07:26:09.417925 4943 generic.go:334] "Generic (PLEG): container finished" podID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerID="a59aa553ba0142aa78b5eb0112aa591ab967e9b0ed4bb9143cc5f2b616152759" exitCode=0 Nov 29 07:26:09 crc kubenswrapper[4943]: I1129 07:26:09.418202 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerDied","Data":"a59aa553ba0142aa78b5eb0112aa591ab967e9b0ed4bb9143cc5f2b616152759"} Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.205508 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.225767 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities\") pod \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.225811 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v2s2\" (UniqueName: \"kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2\") pod \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.225927 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content\") pod \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\" (UID: \"dee253c4-9e3d-4714-8abb-cd2c1fe5e357\") " Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.229757 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities" (OuterVolumeSpecName: "utilities") pod "dee253c4-9e3d-4714-8abb-cd2c1fe5e357" (UID: "dee253c4-9e3d-4714-8abb-cd2c1fe5e357"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.236112 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2" (OuterVolumeSpecName: "kube-api-access-4v2s2") pod "dee253c4-9e3d-4714-8abb-cd2c1fe5e357" (UID: "dee253c4-9e3d-4714-8abb-cd2c1fe5e357"). InnerVolumeSpecName "kube-api-access-4v2s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.257398 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dee253c4-9e3d-4714-8abb-cd2c1fe5e357" (UID: "dee253c4-9e3d-4714-8abb-cd2c1fe5e357"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.328182 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.328480 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.328550 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v2s2\" (UniqueName: \"kubernetes.io/projected/dee253c4-9e3d-4714-8abb-cd2c1fe5e357-kube-api-access-4v2s2\") on node \"crc\" DevicePath \"\"" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.429702 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj66" event={"ID":"dee253c4-9e3d-4714-8abb-cd2c1fe5e357","Type":"ContainerDied","Data":"df7364c096a9b0bd3cfcde11a0faeb5e8feaaaeae4376a99d2cecf270f742b38"} Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.429806 4943 scope.go:117] "RemoveContainer" containerID="a59aa553ba0142aa78b5eb0112aa591ab967e9b0ed4bb9143cc5f2b616152759" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.429734 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj66" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.461933 4943 scope.go:117] "RemoveContainer" containerID="a0132c4967ab21c9f1a345f25df763744b46d0c5799c0123e044f88311162e94" Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.466907 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.474457 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj66"] Nov 29 07:26:10 crc kubenswrapper[4943]: I1129 07:26:10.488052 4943 scope.go:117] "RemoveContainer" containerID="45fe043388d52cfedec8d2cc4b3e9b5a2c00964a3e40e9b491ca0fa54e8c7ced" Nov 29 07:26:11 crc kubenswrapper[4943]: I1129 07:26:11.340509 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" path="/var/lib/kubelet/pods/dee253c4-9e3d-4714-8abb-cd2c1fe5e357/volumes" Nov 29 07:26:14 crc kubenswrapper[4943]: I1129 07:26:14.328039 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:26:14 crc kubenswrapper[4943]: E1129 07:26:14.329104 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:26:25 crc kubenswrapper[4943]: I1129 07:26:25.334341 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:26:25 crc kubenswrapper[4943]: E1129 07:26:25.335237 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:26:39 crc kubenswrapper[4943]: I1129 07:26:39.327639 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:26:39 crc kubenswrapper[4943]: E1129 07:26:39.328511 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:26:50 crc kubenswrapper[4943]: I1129 07:26:50.327648 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:26:50 crc kubenswrapper[4943]: E1129 07:26:50.328492 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:27:04 crc kubenswrapper[4943]: I1129 07:27:04.327621 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:27:04 crc kubenswrapper[4943]: E1129 07:27:04.328297 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:27:17 crc kubenswrapper[4943]: I1129 07:27:17.331799 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:27:17 crc kubenswrapper[4943]: E1129 07:27:17.332722 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:27:28 crc kubenswrapper[4943]: I1129 07:27:28.327201 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:27:28 crc kubenswrapper[4943]: E1129 07:27:28.328120 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:27:43 crc kubenswrapper[4943]: I1129 07:27:43.327900 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:27:43 crc kubenswrapper[4943]: E1129 07:27:43.328940 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:27:58 crc kubenswrapper[4943]: I1129 07:27:58.328216 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:27:58 crc kubenswrapper[4943]: E1129 07:27:58.329587 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:28:09 crc kubenswrapper[4943]: I1129 07:28:09.328156 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:28:09 crc kubenswrapper[4943]: E1129 07:28:09.329105 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:28:21 crc kubenswrapper[4943]: I1129 07:28:21.328055 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:28:21 crc kubenswrapper[4943]: E1129 07:28:21.330940 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:28:36 crc kubenswrapper[4943]: I1129 07:28:36.328138 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:28:36 crc kubenswrapper[4943]: E1129 07:28:36.329035 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:28:47 crc kubenswrapper[4943]: I1129 07:28:47.328269 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:28:47 crc kubenswrapper[4943]: E1129 07:28:47.328831 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:29:01 crc kubenswrapper[4943]: I1129 07:29:01.328105 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:29:01 crc kubenswrapper[4943]: E1129 07:29:01.329079 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:29:15 crc kubenswrapper[4943]: I1129 07:29:15.340253 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:29:15 crc kubenswrapper[4943]: E1129 07:29:15.341699 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:29:28 crc kubenswrapper[4943]: I1129 07:29:28.328448 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:29:28 crc kubenswrapper[4943]: E1129 07:29:28.329800 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:29:39 crc kubenswrapper[4943]: I1129 07:29:39.328236 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:29:39 crc kubenswrapper[4943]: E1129 07:29:39.329162 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:29:52 crc kubenswrapper[4943]: I1129 07:29:52.328918 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:29:52 crc kubenswrapper[4943]: E1129 07:29:52.330058 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.235905 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq"] Nov 29 07:30:00 crc kubenswrapper[4943]: E1129 07:30:00.236976 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="registry-server" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.236993 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="registry-server" Nov 29 07:30:00 crc kubenswrapper[4943]: E1129 07:30:00.237010 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="extract-content" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.237021 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="extract-content" Nov 29 07:30:00 crc kubenswrapper[4943]: E1129 07:30:00.237048 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="extract-utilities" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.237055 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="extract-utilities" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.237292 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="dee253c4-9e3d-4714-8abb-cd2c1fe5e357" containerName="registry-server" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.238110 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.240703 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.241095 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.258750 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq"] Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.368667 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rssc\" (UniqueName: \"kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.369014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.369219 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.470783 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.470897 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.470942 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rssc\" (UniqueName: \"kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.471924 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.479246 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.492678 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rssc\" (UniqueName: \"kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc\") pod \"collect-profiles-29406690-vvqfq\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.558284 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:00 crc kubenswrapper[4943]: I1129 07:30:00.993200 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq"] Nov 29 07:30:01 crc kubenswrapper[4943]: I1129 07:30:01.634688 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" event={"ID":"e55dbca3-5f0d-4f4c-b518-6258340a394f","Type":"ContainerStarted","Data":"cc8152106d74ae4f4964fb1f30e1eb99fa9842944c26001caf3cbad68075f41b"} Nov 29 07:30:01 crc kubenswrapper[4943]: I1129 07:30:01.635026 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" event={"ID":"e55dbca3-5f0d-4f4c-b518-6258340a394f","Type":"ContainerStarted","Data":"27f59d29709986c260cb3c59c7ab4220b1db501a544178af1c1e03b21714b3ee"} Nov 29 07:30:01 crc kubenswrapper[4943]: I1129 07:30:01.657340 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" podStartSLOduration=1.6573249049999998 podStartE2EDuration="1.657324905s" podCreationTimestamp="2025-11-29 07:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:30:01.654395583 +0000 UTC m=+3376.584484366" watchObservedRunningTime="2025-11-29 07:30:01.657324905 +0000 UTC m=+3376.587413658" Nov 29 07:30:02 crc kubenswrapper[4943]: I1129 07:30:02.646372 4943 generic.go:334] "Generic (PLEG): container finished" podID="e55dbca3-5f0d-4f4c-b518-6258340a394f" containerID="cc8152106d74ae4f4964fb1f30e1eb99fa9842944c26001caf3cbad68075f41b" exitCode=0 Nov 29 07:30:02 crc kubenswrapper[4943]: I1129 07:30:02.646425 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" event={"ID":"e55dbca3-5f0d-4f4c-b518-6258340a394f","Type":"ContainerDied","Data":"cc8152106d74ae4f4964fb1f30e1eb99fa9842944c26001caf3cbad68075f41b"} Nov 29 07:30:03 crc kubenswrapper[4943]: I1129 07:30:03.327208 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:30:03 crc kubenswrapper[4943]: E1129 07:30:03.327663 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.116522 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.250901 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume\") pod \"e55dbca3-5f0d-4f4c-b518-6258340a394f\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.251275 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume\") pod \"e55dbca3-5f0d-4f4c-b518-6258340a394f\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.251437 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rssc\" (UniqueName: \"kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc\") pod \"e55dbca3-5f0d-4f4c-b518-6258340a394f\" (UID: \"e55dbca3-5f0d-4f4c-b518-6258340a394f\") " Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.251936 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume" (OuterVolumeSpecName: "config-volume") pod "e55dbca3-5f0d-4f4c-b518-6258340a394f" (UID: "e55dbca3-5f0d-4f4c-b518-6258340a394f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.257702 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc" (OuterVolumeSpecName: "kube-api-access-5rssc") pod "e55dbca3-5f0d-4f4c-b518-6258340a394f" (UID: "e55dbca3-5f0d-4f4c-b518-6258340a394f"). InnerVolumeSpecName "kube-api-access-5rssc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.258167 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e55dbca3-5f0d-4f4c-b518-6258340a394f" (UID: "e55dbca3-5f0d-4f4c-b518-6258340a394f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.353747 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rssc\" (UniqueName: \"kubernetes.io/projected/e55dbca3-5f0d-4f4c-b518-6258340a394f-kube-api-access-5rssc\") on node \"crc\" DevicePath \"\"" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.353827 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e55dbca3-5f0d-4f4c-b518-6258340a394f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.353836 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e55dbca3-5f0d-4f4c-b518-6258340a394f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.670364 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" event={"ID":"e55dbca3-5f0d-4f4c-b518-6258340a394f","Type":"ContainerDied","Data":"27f59d29709986c260cb3c59c7ab4220b1db501a544178af1c1e03b21714b3ee"} Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.670446 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27f59d29709986c260cb3c59c7ab4220b1db501a544178af1c1e03b21714b3ee" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.670528 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq" Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.744826 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng"] Nov 29 07:30:04 crc kubenswrapper[4943]: I1129 07:30:04.751451 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406645-9bvng"] Nov 29 07:30:05 crc kubenswrapper[4943]: I1129 07:30:05.343126 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bddbe380-25b7-4d94-a35e-63630fa940f3" path="/var/lib/kubelet/pods/bddbe380-25b7-4d94-a35e-63630fa940f3/volumes" Nov 29 07:30:18 crc kubenswrapper[4943]: I1129 07:30:18.328081 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:30:18 crc kubenswrapper[4943]: E1129 07:30:18.330764 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:30:32 crc kubenswrapper[4943]: I1129 07:30:32.327421 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:30:32 crc kubenswrapper[4943]: E1129 07:30:32.328681 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:30:43 crc kubenswrapper[4943]: I1129 07:30:43.328361 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:30:43 crc kubenswrapper[4943]: E1129 07:30:43.329220 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:30:52 crc kubenswrapper[4943]: I1129 07:30:52.643705 4943 scope.go:117] "RemoveContainer" containerID="4cfaa36f3c04bafcac0d5869505e8063f6f243917a5ea3cca4218d4672b45a06" Nov 29 07:30:55 crc kubenswrapper[4943]: I1129 07:30:55.332985 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:30:55 crc kubenswrapper[4943]: E1129 07:30:55.333591 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:31:07 crc kubenswrapper[4943]: I1129 07:31:07.327863 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:31:09 crc kubenswrapper[4943]: I1129 07:31:09.321694 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e"} Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.284088 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:31 crc kubenswrapper[4943]: E1129 07:31:31.285088 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e55dbca3-5f0d-4f4c-b518-6258340a394f" containerName="collect-profiles" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.285107 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e55dbca3-5f0d-4f4c-b518-6258340a394f" containerName="collect-profiles" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.285364 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e55dbca3-5f0d-4f4c-b518-6258340a394f" containerName="collect-profiles" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.286954 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.294556 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.413633 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.414621 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26trj\" (UniqueName: \"kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.414799 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.516925 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.517047 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.517091 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26trj\" (UniqueName: \"kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.518632 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.518893 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.542103 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26trj\" (UniqueName: \"kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj\") pod \"redhat-operators-lfv47\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:31 crc kubenswrapper[4943]: I1129 07:31:31.612504 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:32 crc kubenswrapper[4943]: I1129 07:31:32.078003 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:32 crc kubenswrapper[4943]: I1129 07:31:32.539102 4943 generic.go:334] "Generic (PLEG): container finished" podID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerID="658633e13a30e9c51b7064e50f4dc0f4124507d67301f626665af583162542fe" exitCode=0 Nov 29 07:31:32 crc kubenswrapper[4943]: I1129 07:31:32.539206 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerDied","Data":"658633e13a30e9c51b7064e50f4dc0f4124507d67301f626665af583162542fe"} Nov 29 07:31:32 crc kubenswrapper[4943]: I1129 07:31:32.539396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerStarted","Data":"ddfca34c3b3bce9c5a0d3805c075be72d203fd63f21c072a929702248f4c7233"} Nov 29 07:31:32 crc kubenswrapper[4943]: I1129 07:31:32.541105 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:31:33 crc kubenswrapper[4943]: I1129 07:31:33.550383 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerStarted","Data":"4ddef840aa868c98f32b7ab5122d4da5f3c507a86dfc505f19aa038d99afb1a1"} Nov 29 07:31:34 crc kubenswrapper[4943]: I1129 07:31:34.561624 4943 generic.go:334] "Generic (PLEG): container finished" podID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerID="4ddef840aa868c98f32b7ab5122d4da5f3c507a86dfc505f19aa038d99afb1a1" exitCode=0 Nov 29 07:31:34 crc kubenswrapper[4943]: I1129 07:31:34.561710 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerDied","Data":"4ddef840aa868c98f32b7ab5122d4da5f3c507a86dfc505f19aa038d99afb1a1"} Nov 29 07:31:35 crc kubenswrapper[4943]: I1129 07:31:35.569990 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerStarted","Data":"4eade0440e78d9ce59cf986e586d1d16744c26b5a9b989ca7413016e9824948a"} Nov 29 07:31:35 crc kubenswrapper[4943]: I1129 07:31:35.585806 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lfv47" podStartSLOduration=2.103496677 podStartE2EDuration="4.585789821s" podCreationTimestamp="2025-11-29 07:31:31 +0000 UTC" firstStartedPulling="2025-11-29 07:31:32.540859983 +0000 UTC m=+3467.470948736" lastFinishedPulling="2025-11-29 07:31:35.023153127 +0000 UTC m=+3469.953241880" observedRunningTime="2025-11-29 07:31:35.584268714 +0000 UTC m=+3470.514357467" watchObservedRunningTime="2025-11-29 07:31:35.585789821 +0000 UTC m=+3470.515878574" Nov 29 07:31:41 crc kubenswrapper[4943]: I1129 07:31:41.613259 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:41 crc kubenswrapper[4943]: I1129 07:31:41.613897 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:41 crc kubenswrapper[4943]: I1129 07:31:41.665272 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:42 crc kubenswrapper[4943]: I1129 07:31:42.682264 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:42 crc kubenswrapper[4943]: I1129 07:31:42.724601 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:44 crc kubenswrapper[4943]: I1129 07:31:44.654726 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lfv47" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="registry-server" containerID="cri-o://4eade0440e78d9ce59cf986e586d1d16744c26b5a9b989ca7413016e9824948a" gracePeriod=2 Nov 29 07:31:46 crc kubenswrapper[4943]: I1129 07:31:46.676572 4943 generic.go:334] "Generic (PLEG): container finished" podID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerID="4eade0440e78d9ce59cf986e586d1d16744c26b5a9b989ca7413016e9824948a" exitCode=0 Nov 29 07:31:46 crc kubenswrapper[4943]: I1129 07:31:46.677386 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerDied","Data":"4eade0440e78d9ce59cf986e586d1d16744c26b5a9b989ca7413016e9824948a"} Nov 29 07:31:46 crc kubenswrapper[4943]: I1129 07:31:46.869298 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.014744 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities\") pod \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.014886 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content\") pod \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.014950 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26trj\" (UniqueName: \"kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj\") pod \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\" (UID: \"4e87fbbe-298e-4659-8ce5-a2279a8554dd\") " Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.016276 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities" (OuterVolumeSpecName: "utilities") pod "4e87fbbe-298e-4659-8ce5-a2279a8554dd" (UID: "4e87fbbe-298e-4659-8ce5-a2279a8554dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.021341 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj" (OuterVolumeSpecName: "kube-api-access-26trj") pod "4e87fbbe-298e-4659-8ce5-a2279a8554dd" (UID: "4e87fbbe-298e-4659-8ce5-a2279a8554dd"). InnerVolumeSpecName "kube-api-access-26trj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.117541 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.117638 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26trj\" (UniqueName: \"kubernetes.io/projected/4e87fbbe-298e-4659-8ce5-a2279a8554dd-kube-api-access-26trj\") on node \"crc\" DevicePath \"\"" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.126453 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e87fbbe-298e-4659-8ce5-a2279a8554dd" (UID: "4e87fbbe-298e-4659-8ce5-a2279a8554dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.219507 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87fbbe-298e-4659-8ce5-a2279a8554dd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.688004 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfv47" event={"ID":"4e87fbbe-298e-4659-8ce5-a2279a8554dd","Type":"ContainerDied","Data":"ddfca34c3b3bce9c5a0d3805c075be72d203fd63f21c072a929702248f4c7233"} Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.688346 4943 scope.go:117] "RemoveContainer" containerID="4eade0440e78d9ce59cf986e586d1d16744c26b5a9b989ca7413016e9824948a" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.688151 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfv47" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.713456 4943 scope.go:117] "RemoveContainer" containerID="4ddef840aa868c98f32b7ab5122d4da5f3c507a86dfc505f19aa038d99afb1a1" Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.719135 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.732673 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lfv47"] Nov 29 07:31:47 crc kubenswrapper[4943]: I1129 07:31:47.754972 4943 scope.go:117] "RemoveContainer" containerID="658633e13a30e9c51b7064e50f4dc0f4124507d67301f626665af583162542fe" Nov 29 07:31:49 crc kubenswrapper[4943]: I1129 07:31:49.341069 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" path="/var/lib/kubelet/pods/4e87fbbe-298e-4659-8ce5-a2279a8554dd/volumes" Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.256449 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.264154 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.273233 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.283244 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.296458 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dzcfk"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.302885 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.308638 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.314445 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.319985 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.325725 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-7vx8j"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.331359 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9sgr6"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.337152 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wxjwj"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.342635 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-2h89p"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.348496 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-b4tvc"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.354054 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.360025 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9sgr6"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.366294 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-9rqvg"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.374437 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f4kgw"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.384674 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9xt24"] Nov 29 07:32:38 crc kubenswrapper[4943]: I1129 07:32:38.393848 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x65dv"] Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.338035 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a4c5e98-1854-4892-985f-00631c232dc5" path="/var/lib/kubelet/pods/1a4c5e98-1854-4892-985f-00631c232dc5/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.339346 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fb5fb95-b94e-4452-af1b-15cf34d847bf" path="/var/lib/kubelet/pods/1fb5fb95-b94e-4452-af1b-15cf34d847bf/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.340016 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e8445cc-bfe6-46ee-bd56-7a051fd994cd" path="/var/lib/kubelet/pods/2e8445cc-bfe6-46ee-bd56-7a051fd994cd/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.340732 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36406046-f137-495b-9a13-cc3e0342f7da" path="/var/lib/kubelet/pods/36406046-f137-495b-9a13-cc3e0342f7da/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.341982 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5" path="/var/lib/kubelet/pods/49dd738a-93f9-4b6a-bc81-a0a5f08f9fa5/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.342658 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62ec2c98-2d89-4018-8a5f-5f90f892935f" path="/var/lib/kubelet/pods/62ec2c98-2d89-4018-8a5f-5f90f892935f/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.343323 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72f82e0b-2e5c-47b7-b12c-91fc6d945a43" path="/var/lib/kubelet/pods/72f82e0b-2e5c-47b7-b12c-91fc6d945a43/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.344426 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ef80f6-d172-490b-838b-7454e9254fc8" path="/var/lib/kubelet/pods/84ef80f6-d172-490b-838b-7454e9254fc8/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.345065 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a93b5d7a-0946-4c3c-990d-e8988279b13a" path="/var/lib/kubelet/pods/a93b5d7a-0946-4c3c-990d-e8988279b13a/volumes" Nov 29 07:32:39 crc kubenswrapper[4943]: I1129 07:32:39.345705 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b073e871-7545-48cd-a3d1-73347171b5dc" path="/var/lib/kubelet/pods/b073e871-7545-48cd-a3d1-73347171b5dc/volumes" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.528017 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc"] Nov 29 07:32:44 crc kubenswrapper[4943]: E1129 07:32:44.530150 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="registry-server" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.530248 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="registry-server" Nov 29 07:32:44 crc kubenswrapper[4943]: E1129 07:32:44.530347 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="extract-content" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.530417 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="extract-content" Nov 29 07:32:44 crc kubenswrapper[4943]: E1129 07:32:44.530508 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="extract-utilities" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.530630 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="extract-utilities" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.530922 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e87fbbe-298e-4659-8ce5-a2279a8554dd" containerName="registry-server" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.531727 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.535034 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.535693 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.535944 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.536271 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.540851 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.548831 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc"] Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.695590 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7l89\" (UniqueName: \"kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.695684 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.695729 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.695791 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.695843 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.797384 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.797791 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.797929 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7l89\" (UniqueName: \"kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.798071 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.798299 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.803614 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.803700 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.803821 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.804579 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.814156 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7l89\" (UniqueName: \"kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:44 crc kubenswrapper[4943]: I1129 07:32:44.853052 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:32:45 crc kubenswrapper[4943]: I1129 07:32:45.371326 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc"] Nov 29 07:32:45 crc kubenswrapper[4943]: I1129 07:32:45.905221 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:32:46 crc kubenswrapper[4943]: I1129 07:32:46.245244 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" event={"ID":"87f41bfa-9923-4bbe-b23c-229a8e7223af","Type":"ContainerStarted","Data":"aa7777994960ecf9ffa5209a46a643d0fed68296ed649081e613484af522c041"} Nov 29 07:32:46 crc kubenswrapper[4943]: I1129 07:32:46.245296 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" event={"ID":"87f41bfa-9923-4bbe-b23c-229a8e7223af","Type":"ContainerStarted","Data":"6bbe1fe8fbb073a3a8dd0d8342e39de7eff8c1197870496af9b887a43a247e1c"} Nov 29 07:32:46 crc kubenswrapper[4943]: I1129 07:32:46.264448 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" podStartSLOduration=1.7239848740000001 podStartE2EDuration="2.264426464s" podCreationTimestamp="2025-11-29 07:32:44 +0000 UTC" firstStartedPulling="2025-11-29 07:32:45.362366435 +0000 UTC m=+3540.292455188" lastFinishedPulling="2025-11-29 07:32:45.902808025 +0000 UTC m=+3540.832896778" observedRunningTime="2025-11-29 07:32:46.259263458 +0000 UTC m=+3541.189352211" watchObservedRunningTime="2025-11-29 07:32:46.264426464 +0000 UTC m=+3541.194515227" Nov 29 07:32:52 crc kubenswrapper[4943]: I1129 07:32:52.765394 4943 scope.go:117] "RemoveContainer" containerID="da15e87baa1a5a844d39d46866ca3316e641a7656522b8983ac1b1ee332cd19d" Nov 29 07:32:52 crc kubenswrapper[4943]: I1129 07:32:52.809794 4943 scope.go:117] "RemoveContainer" containerID="5f65695c3772c7d2863f6b60632c35b9ba30bcd39d975205bbd1e6c3f061c62c" Nov 29 07:32:52 crc kubenswrapper[4943]: I1129 07:32:52.853873 4943 scope.go:117] "RemoveContainer" containerID="c4236c3b987e967329a28a6256282ba8992591609610af4bcd34c4e1a40deeae" Nov 29 07:32:52 crc kubenswrapper[4943]: I1129 07:32:52.922552 4943 scope.go:117] "RemoveContainer" containerID="ce328c135e925e8f845dedffc43d2737a3a4e9f0282fd9af3843384257fab2ac" Nov 29 07:32:52 crc kubenswrapper[4943]: I1129 07:32:52.986540 4943 scope.go:117] "RemoveContainer" containerID="f8c8003642d412e91403563830047cd3040ef94750552ac2d39df78eda6ef06f" Nov 29 07:32:53 crc kubenswrapper[4943]: I1129 07:32:53.024395 4943 scope.go:117] "RemoveContainer" containerID="15e9e615c48035d3d9ed6a5d3058d764f5132006784e756b90ee48d6902de49d" Nov 29 07:32:53 crc kubenswrapper[4943]: I1129 07:32:53.091357 4943 scope.go:117] "RemoveContainer" containerID="6e591dfd109d8c800e4c069a5ed0a4a156db79e7bfd904edd2a2350ba0ef5521" Nov 29 07:32:53 crc kubenswrapper[4943]: I1129 07:32:53.125244 4943 scope.go:117] "RemoveContainer" containerID="0c36c5fad6299299ab0f4709f9d10ff3c251481ab11aa1f023a397a3fba41b79" Nov 29 07:32:53 crc kubenswrapper[4943]: I1129 07:32:53.157649 4943 scope.go:117] "RemoveContainer" containerID="0acd3887c38c8ce166238bd104d3f76f44a100b975ec75724fdb916b5bf5ff7a" Nov 29 07:32:53 crc kubenswrapper[4943]: I1129 07:32:53.199595 4943 scope.go:117] "RemoveContainer" containerID="a91b742e3f1ae545f8422401ed388733ba9ca38f9db5120293cb59b649459b9f" Nov 29 07:33:03 crc kubenswrapper[4943]: I1129 07:33:03.428204 4943 generic.go:334] "Generic (PLEG): container finished" podID="87f41bfa-9923-4bbe-b23c-229a8e7223af" containerID="aa7777994960ecf9ffa5209a46a643d0fed68296ed649081e613484af522c041" exitCode=0 Nov 29 07:33:03 crc kubenswrapper[4943]: I1129 07:33:03.428329 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" event={"ID":"87f41bfa-9923-4bbe-b23c-229a8e7223af","Type":"ContainerDied","Data":"aa7777994960ecf9ffa5209a46a643d0fed68296ed649081e613484af522c041"} Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.853241 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.979238 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle\") pod \"87f41bfa-9923-4bbe-b23c-229a8e7223af\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.979315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory\") pod \"87f41bfa-9923-4bbe-b23c-229a8e7223af\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.979351 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph\") pod \"87f41bfa-9923-4bbe-b23c-229a8e7223af\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.979468 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key\") pod \"87f41bfa-9923-4bbe-b23c-229a8e7223af\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.979654 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7l89\" (UniqueName: \"kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89\") pod \"87f41bfa-9923-4bbe-b23c-229a8e7223af\" (UID: \"87f41bfa-9923-4bbe-b23c-229a8e7223af\") " Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.984526 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph" (OuterVolumeSpecName: "ceph") pod "87f41bfa-9923-4bbe-b23c-229a8e7223af" (UID: "87f41bfa-9923-4bbe-b23c-229a8e7223af"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.985050 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89" (OuterVolumeSpecName: "kube-api-access-l7l89") pod "87f41bfa-9923-4bbe-b23c-229a8e7223af" (UID: "87f41bfa-9923-4bbe-b23c-229a8e7223af"). InnerVolumeSpecName "kube-api-access-l7l89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:33:04 crc kubenswrapper[4943]: I1129 07:33:04.991725 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "87f41bfa-9923-4bbe-b23c-229a8e7223af" (UID: "87f41bfa-9923-4bbe-b23c-229a8e7223af"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.013600 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory" (OuterVolumeSpecName: "inventory") pod "87f41bfa-9923-4bbe-b23c-229a8e7223af" (UID: "87f41bfa-9923-4bbe-b23c-229a8e7223af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.019073 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "87f41bfa-9923-4bbe-b23c-229a8e7223af" (UID: "87f41bfa-9923-4bbe-b23c-229a8e7223af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.081489 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7l89\" (UniqueName: \"kubernetes.io/projected/87f41bfa-9923-4bbe-b23c-229a8e7223af-kube-api-access-l7l89\") on node \"crc\" DevicePath \"\"" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.081519 4943 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.081534 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.081545 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.081553 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87f41bfa-9923-4bbe-b23c-229a8e7223af-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.451444 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" event={"ID":"87f41bfa-9923-4bbe-b23c-229a8e7223af","Type":"ContainerDied","Data":"6bbe1fe8fbb073a3a8dd0d8342e39de7eff8c1197870496af9b887a43a247e1c"} Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.451542 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bbe1fe8fbb073a3a8dd0d8342e39de7eff8c1197870496af9b887a43a247e1c" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.451753 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.597872 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg"] Nov 29 07:33:05 crc kubenswrapper[4943]: E1129 07:33:05.598392 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87f41bfa-9923-4bbe-b23c-229a8e7223af" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.598426 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="87f41bfa-9923-4bbe-b23c-229a8e7223af" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.598760 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="87f41bfa-9923-4bbe-b23c-229a8e7223af" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.599742 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.602219 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.602339 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.602815 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.602883 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.602826 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.614220 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg"] Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.692146 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.692378 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.692407 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.692491 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v58zn\" (UniqueName: \"kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.692653 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.795407 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.795501 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.795649 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v58zn\" (UniqueName: \"kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.795792 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.795962 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.801119 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.801535 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.802932 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.806236 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.813128 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v58zn\" (UniqueName: \"kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:05 crc kubenswrapper[4943]: I1129 07:33:05.924943 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:33:06 crc kubenswrapper[4943]: I1129 07:33:06.512931 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg"] Nov 29 07:33:06 crc kubenswrapper[4943]: W1129 07:33:06.516448 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcee2b53_5ddc_4a3e_afac_ffe2812f24e4.slice/crio-d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d WatchSource:0}: Error finding container d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d: Status 404 returned error can't find the container with id d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d Nov 29 07:33:07 crc kubenswrapper[4943]: I1129 07:33:07.472641 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" event={"ID":"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4","Type":"ContainerStarted","Data":"d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d"} Nov 29 07:33:08 crc kubenswrapper[4943]: I1129 07:33:08.481489 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" event={"ID":"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4","Type":"ContainerStarted","Data":"b789d87b45bbfe2166301519662cee674136ce2a9cfc2fe6dfacbb82b2bcf3f7"} Nov 29 07:33:08 crc kubenswrapper[4943]: I1129 07:33:08.521518 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" podStartSLOduration=2.198334064 podStartE2EDuration="3.52149712s" podCreationTimestamp="2025-11-29 07:33:05 +0000 UTC" firstStartedPulling="2025-11-29 07:33:06.51980909 +0000 UTC m=+3561.449897853" lastFinishedPulling="2025-11-29 07:33:07.842972156 +0000 UTC m=+3562.773060909" observedRunningTime="2025-11-29 07:33:08.517333898 +0000 UTC m=+3563.447422761" watchObservedRunningTime="2025-11-29 07:33:08.52149712 +0000 UTC m=+3563.451585893" Nov 29 07:33:32 crc kubenswrapper[4943]: I1129 07:33:32.613658 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:33:32 crc kubenswrapper[4943]: I1129 07:33:32.615750 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:34:02 crc kubenswrapper[4943]: I1129 07:34:02.613742 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:34:02 crc kubenswrapper[4943]: I1129 07:34:02.614503 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.613800 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.614404 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.614463 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.615147 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.615216 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e" gracePeriod=600 Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.838345 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e" exitCode=0 Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.838692 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e"} Nov 29 07:34:32 crc kubenswrapper[4943]: I1129 07:34:32.838729 4943 scope.go:117] "RemoveContainer" containerID="0de178a199ed75739d34adb20a0c880f3491d2b716ae43879164d5c57c5501bb" Nov 29 07:34:33 crc kubenswrapper[4943]: I1129 07:34:33.850862 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09"} Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.872210 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.874325 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.891290 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.968515 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rttg\" (UniqueName: \"kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.968733 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:44 crc kubenswrapper[4943]: I1129 07:34:44.968776 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.068385 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.070681 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.070730 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rttg\" (UniqueName: \"kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.070872 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.070905 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.071538 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.071598 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.109431 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.120312 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rttg\" (UniqueName: \"kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg\") pod \"certified-operators-2wcqx\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.172998 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.173351 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb9q8\" (UniqueName: \"kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.173475 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.209574 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.275856 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.276238 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb9q8\" (UniqueName: \"kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.276371 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.277052 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.277373 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.301274 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb9q8\" (UniqueName: \"kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8\") pod \"community-operators-z674n\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.444147 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.784517 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.988450 4943 generic.go:334] "Generic (PLEG): container finished" podID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerID="2a915e8ca1fd9e360b72dae899dc39e7ece272c39ea4c0800989d4ef94ecbc5b" exitCode=0 Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.988684 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerDied","Data":"2a915e8ca1fd9e360b72dae899dc39e7ece272c39ea4c0800989d4ef94ecbc5b"} Nov 29 07:34:45 crc kubenswrapper[4943]: I1129 07:34:45.988798 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerStarted","Data":"33bcd10171f7ef4b6865a1321529c59010497aef846f6124a615240d6f555ff2"} Nov 29 07:34:46 crc kubenswrapper[4943]: I1129 07:34:46.037212 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:34:46 crc kubenswrapper[4943]: W1129 07:34:46.042280 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9eb3f41_764b_45e6_83d7_84e6b3733f1e.slice/crio-e32756465d4ff23ac3f7a989d72902e9bebd3823f375af516c3137744ab4594a WatchSource:0}: Error finding container e32756465d4ff23ac3f7a989d72902e9bebd3823f375af516c3137744ab4594a: Status 404 returned error can't find the container with id e32756465d4ff23ac3f7a989d72902e9bebd3823f375af516c3137744ab4594a Nov 29 07:34:47 crc kubenswrapper[4943]: I1129 07:34:47.000110 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerID="d276194bb6e62e37f8867489741a86ce79b1bc413e473c93eb5381b953624fd6" exitCode=0 Nov 29 07:34:47 crc kubenswrapper[4943]: I1129 07:34:47.000195 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerDied","Data":"d276194bb6e62e37f8867489741a86ce79b1bc413e473c93eb5381b953624fd6"} Nov 29 07:34:47 crc kubenswrapper[4943]: I1129 07:34:47.000894 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerStarted","Data":"e32756465d4ff23ac3f7a989d72902e9bebd3823f375af516c3137744ab4594a"} Nov 29 07:34:48 crc kubenswrapper[4943]: I1129 07:34:48.012610 4943 generic.go:334] "Generic (PLEG): container finished" podID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerID="c93d5eaba4cacb198593c1588189bc5219d27fa0c44d2312e45cc3b24fb77e6c" exitCode=0 Nov 29 07:34:48 crc kubenswrapper[4943]: I1129 07:34:48.012728 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerDied","Data":"c93d5eaba4cacb198593c1588189bc5219d27fa0c44d2312e45cc3b24fb77e6c"} Nov 29 07:34:49 crc kubenswrapper[4943]: I1129 07:34:49.020331 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerID="d9a28f4aed4d1305cca7908c681d710bed21140a0ac40ab0056bf2a71c1fd9dc" exitCode=0 Nov 29 07:34:49 crc kubenswrapper[4943]: I1129 07:34:49.020461 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerDied","Data":"d9a28f4aed4d1305cca7908c681d710bed21140a0ac40ab0056bf2a71c1fd9dc"} Nov 29 07:34:50 crc kubenswrapper[4943]: I1129 07:34:50.031450 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerStarted","Data":"86993e5c2e67ef8a3d59c516c905e5cb297b341a9e156d110c472e7f02a27c05"} Nov 29 07:34:50 crc kubenswrapper[4943]: I1129 07:34:50.040532 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerStarted","Data":"2c29ed709cb7f27cefa0c417b95089ce2453cb75f396008617f78dfd2d9b407c"} Nov 29 07:34:50 crc kubenswrapper[4943]: I1129 07:34:50.076636 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z674n" podStartSLOduration=2.593029681 podStartE2EDuration="5.076618876s" podCreationTimestamp="2025-11-29 07:34:45 +0000 UTC" firstStartedPulling="2025-11-29 07:34:47.004587584 +0000 UTC m=+3661.934676367" lastFinishedPulling="2025-11-29 07:34:49.488176809 +0000 UTC m=+3664.418265562" observedRunningTime="2025-11-29 07:34:50.050026464 +0000 UTC m=+3664.980115217" watchObservedRunningTime="2025-11-29 07:34:50.076618876 +0000 UTC m=+3665.006707629" Nov 29 07:34:50 crc kubenswrapper[4943]: I1129 07:34:50.078252 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2wcqx" podStartSLOduration=3.246405347 podStartE2EDuration="6.078246565s" podCreationTimestamp="2025-11-29 07:34:44 +0000 UTC" firstStartedPulling="2025-11-29 07:34:45.990001917 +0000 UTC m=+3660.920090670" lastFinishedPulling="2025-11-29 07:34:48.821843135 +0000 UTC m=+3663.751931888" observedRunningTime="2025-11-29 07:34:50.069107961 +0000 UTC m=+3664.999196724" watchObservedRunningTime="2025-11-29 07:34:50.078246565 +0000 UTC m=+3665.008335318" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.210647 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.211216 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.253222 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.445354 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.445427 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:55 crc kubenswrapper[4943]: I1129 07:34:55.495523 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:56 crc kubenswrapper[4943]: I1129 07:34:56.155937 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:56 crc kubenswrapper[4943]: I1129 07:34:56.180698 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:57 crc kubenswrapper[4943]: I1129 07:34:57.057979 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:34:58 crc kubenswrapper[4943]: I1129 07:34:58.134244 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2wcqx" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="registry-server" containerID="cri-o://2c29ed709cb7f27cefa0c417b95089ce2453cb75f396008617f78dfd2d9b407c" gracePeriod=2 Nov 29 07:34:58 crc kubenswrapper[4943]: I1129 07:34:58.462946 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:34:58 crc kubenswrapper[4943]: I1129 07:34:58.463189 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z674n" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="registry-server" containerID="cri-o://86993e5c2e67ef8a3d59c516c905e5cb297b341a9e156d110c472e7f02a27c05" gracePeriod=2 Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.144612 4943 generic.go:334] "Generic (PLEG): container finished" podID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerID="2c29ed709cb7f27cefa0c417b95089ce2453cb75f396008617f78dfd2d9b407c" exitCode=0 Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.144676 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerDied","Data":"2c29ed709cb7f27cefa0c417b95089ce2453cb75f396008617f78dfd2d9b407c"} Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.144940 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wcqx" event={"ID":"96366d60-62ed-41f9-8b29-1bfe27f7afd4","Type":"ContainerDied","Data":"33bcd10171f7ef4b6865a1321529c59010497aef846f6124a615240d6f555ff2"} Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.144959 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33bcd10171f7ef4b6865a1321529c59010497aef846f6124a615240d6f555ff2" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.149097 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerID="86993e5c2e67ef8a3d59c516c905e5cb297b341a9e156d110c472e7f02a27c05" exitCode=0 Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.149129 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerDied","Data":"86993e5c2e67ef8a3d59c516c905e5cb297b341a9e156d110c472e7f02a27c05"} Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.178179 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.369961 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content\") pod \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.371851 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities\") pod \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.372063 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rttg\" (UniqueName: \"kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg\") pod \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\" (UID: \"96366d60-62ed-41f9-8b29-1bfe27f7afd4\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.375885 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities" (OuterVolumeSpecName: "utilities") pod "96366d60-62ed-41f9-8b29-1bfe27f7afd4" (UID: "96366d60-62ed-41f9-8b29-1bfe27f7afd4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.408873 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg" (OuterVolumeSpecName: "kube-api-access-8rttg") pod "96366d60-62ed-41f9-8b29-1bfe27f7afd4" (UID: "96366d60-62ed-41f9-8b29-1bfe27f7afd4"). InnerVolumeSpecName "kube-api-access-8rttg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.413444 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "96366d60-62ed-41f9-8b29-1bfe27f7afd4" (UID: "96366d60-62ed-41f9-8b29-1bfe27f7afd4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.475352 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rttg\" (UniqueName: \"kubernetes.io/projected/96366d60-62ed-41f9-8b29-1bfe27f7afd4-kube-api-access-8rttg\") on node \"crc\" DevicePath \"\"" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.475424 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.475437 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96366d60-62ed-41f9-8b29-1bfe27f7afd4-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.594830 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.780722 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb9q8\" (UniqueName: \"kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8\") pod \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.781024 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content\") pod \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.781109 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities\") pod \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\" (UID: \"c9eb3f41-764b-45e6-83d7-84e6b3733f1e\") " Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.782065 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities" (OuterVolumeSpecName: "utilities") pod "c9eb3f41-764b-45e6-83d7-84e6b3733f1e" (UID: "c9eb3f41-764b-45e6-83d7-84e6b3733f1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.787289 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8" (OuterVolumeSpecName: "kube-api-access-jb9q8") pod "c9eb3f41-764b-45e6-83d7-84e6b3733f1e" (UID: "c9eb3f41-764b-45e6-83d7-84e6b3733f1e"). InnerVolumeSpecName "kube-api-access-jb9q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.827994 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9eb3f41-764b-45e6-83d7-84e6b3733f1e" (UID: "c9eb3f41-764b-45e6-83d7-84e6b3733f1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.883237 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.883273 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb9q8\" (UniqueName: \"kubernetes.io/projected/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-kube-api-access-jb9q8\") on node \"crc\" DevicePath \"\"" Nov 29 07:34:59 crc kubenswrapper[4943]: I1129 07:34:59.883283 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9eb3f41-764b-45e6-83d7-84e6b3733f1e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.158642 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z674n" event={"ID":"c9eb3f41-764b-45e6-83d7-84e6b3733f1e","Type":"ContainerDied","Data":"e32756465d4ff23ac3f7a989d72902e9bebd3823f375af516c3137744ab4594a"} Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.158701 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z674n" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.158708 4943 scope.go:117] "RemoveContainer" containerID="86993e5c2e67ef8a3d59c516c905e5cb297b341a9e156d110c472e7f02a27c05" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.158662 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wcqx" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.193173 4943 scope.go:117] "RemoveContainer" containerID="d9a28f4aed4d1305cca7908c681d710bed21140a0ac40ab0056bf2a71c1fd9dc" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.200458 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.213762 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z674n"] Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.224476 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.225810 4943 scope.go:117] "RemoveContainer" containerID="d276194bb6e62e37f8867489741a86ce79b1bc413e473c93eb5381b953624fd6" Nov 29 07:35:00 crc kubenswrapper[4943]: I1129 07:35:00.234583 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2wcqx"] Nov 29 07:35:01 crc kubenswrapper[4943]: I1129 07:35:01.364192 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" path="/var/lib/kubelet/pods/96366d60-62ed-41f9-8b29-1bfe27f7afd4/volumes" Nov 29 07:35:01 crc kubenswrapper[4943]: I1129 07:35:01.365660 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" path="/var/lib/kubelet/pods/c9eb3f41-764b-45e6-83d7-84e6b3733f1e/volumes" Nov 29 07:35:16 crc kubenswrapper[4943]: I1129 07:35:16.294076 4943 generic.go:334] "Generic (PLEG): container finished" podID="dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" containerID="b789d87b45bbfe2166301519662cee674136ce2a9cfc2fe6dfacbb82b2bcf3f7" exitCode=0 Nov 29 07:35:16 crc kubenswrapper[4943]: I1129 07:35:16.294231 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" event={"ID":"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4","Type":"ContainerDied","Data":"b789d87b45bbfe2166301519662cee674136ce2a9cfc2fe6dfacbb82b2bcf3f7"} Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.681682 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.839162 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle\") pod \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.839661 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key\") pod \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.839785 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v58zn\" (UniqueName: \"kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn\") pod \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.839842 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory\") pod \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.839923 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph\") pod \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\" (UID: \"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4\") " Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.845229 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph" (OuterVolumeSpecName: "ceph") pod "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" (UID: "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.846987 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" (UID: "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.848019 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn" (OuterVolumeSpecName: "kube-api-access-v58zn") pod "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" (UID: "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4"). InnerVolumeSpecName "kube-api-access-v58zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.866949 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" (UID: "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.874411 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory" (OuterVolumeSpecName: "inventory") pod "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" (UID: "dcee2b53-5ddc-4a3e-afac-ffe2812f24e4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.942934 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.942986 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v58zn\" (UniqueName: \"kubernetes.io/projected/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-kube-api-access-v58zn\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.943007 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.943026 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:17 crc kubenswrapper[4943]: I1129 07:35:17.943043 4943 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcee2b53-5ddc-4a3e-afac-ffe2812f24e4-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.312115 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" event={"ID":"dcee2b53-5ddc-4a3e-afac-ffe2812f24e4","Type":"ContainerDied","Data":"d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d"} Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.312406 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d519c50b6a47a2653928fb3c6aa92b804b5a4fb3890eb2d9cba02c2e2edad01d" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.312175 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408326 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8"] Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408675 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="extract-utilities" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408692 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="extract-utilities" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408701 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408708 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408729 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="extract-content" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408735 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="extract-content" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408745 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408751 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408761 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="extract-content" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408767 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="extract-content" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408773 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408779 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: E1129 07:35:18.408789 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="extract-utilities" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408794 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="extract-utilities" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408939 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcee2b53-5ddc-4a3e-afac-ffe2812f24e4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408963 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9eb3f41-764b-45e6-83d7-84e6b3733f1e" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.408978 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="96366d60-62ed-41f9-8b29-1bfe27f7afd4" containerName="registry-server" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.409544 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.412551 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.412765 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.412942 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.415378 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.417002 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.419683 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8"] Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.554414 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdscd\" (UniqueName: \"kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.554470 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.554805 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.554928 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.656857 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.657215 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.657351 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdscd\" (UniqueName: \"kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.657481 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.661422 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.661508 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.664322 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.679511 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdscd\" (UniqueName: \"kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-486w8\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:18 crc kubenswrapper[4943]: I1129 07:35:18.730874 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:19 crc kubenswrapper[4943]: I1129 07:35:19.223544 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8"] Nov 29 07:35:19 crc kubenswrapper[4943]: W1129 07:35:19.235342 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3504f2d_9001_4652_8404_4f2ac4265eef.slice/crio-12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d WatchSource:0}: Error finding container 12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d: Status 404 returned error can't find the container with id 12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d Nov 29 07:35:19 crc kubenswrapper[4943]: I1129 07:35:19.325726 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" event={"ID":"e3504f2d-9001-4652-8404-4f2ac4265eef","Type":"ContainerStarted","Data":"12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d"} Nov 29 07:35:20 crc kubenswrapper[4943]: I1129 07:35:20.336810 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" event={"ID":"e3504f2d-9001-4652-8404-4f2ac4265eef","Type":"ContainerStarted","Data":"b879ab8db0edf84146cc11efd00d821d27ed90ccd936bd2212c10e31283d6deb"} Nov 29 07:35:20 crc kubenswrapper[4943]: I1129 07:35:20.393304 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" podStartSLOduration=1.774606404 podStartE2EDuration="2.393276971s" podCreationTimestamp="2025-11-29 07:35:18 +0000 UTC" firstStartedPulling="2025-11-29 07:35:19.237731852 +0000 UTC m=+3694.167820605" lastFinishedPulling="2025-11-29 07:35:19.856402399 +0000 UTC m=+3694.786491172" observedRunningTime="2025-11-29 07:35:20.353828775 +0000 UTC m=+3695.283917608" watchObservedRunningTime="2025-11-29 07:35:20.393276971 +0000 UTC m=+3695.323365734" Nov 29 07:35:53 crc kubenswrapper[4943]: I1129 07:35:53.661896 4943 generic.go:334] "Generic (PLEG): container finished" podID="e3504f2d-9001-4652-8404-4f2ac4265eef" containerID="b879ab8db0edf84146cc11efd00d821d27ed90ccd936bd2212c10e31283d6deb" exitCode=0 Nov 29 07:35:53 crc kubenswrapper[4943]: I1129 07:35:53.662176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" event={"ID":"e3504f2d-9001-4652-8404-4f2ac4265eef","Type":"ContainerDied","Data":"b879ab8db0edf84146cc11efd00d821d27ed90ccd936bd2212c10e31283d6deb"} Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.145629 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.224972 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdscd\" (UniqueName: \"kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd\") pod \"e3504f2d-9001-4652-8404-4f2ac4265eef\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.225089 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph\") pod \"e3504f2d-9001-4652-8404-4f2ac4265eef\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.225158 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key\") pod \"e3504f2d-9001-4652-8404-4f2ac4265eef\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.226480 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory\") pod \"e3504f2d-9001-4652-8404-4f2ac4265eef\" (UID: \"e3504f2d-9001-4652-8404-4f2ac4265eef\") " Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.232848 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph" (OuterVolumeSpecName: "ceph") pod "e3504f2d-9001-4652-8404-4f2ac4265eef" (UID: "e3504f2d-9001-4652-8404-4f2ac4265eef"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.232905 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd" (OuterVolumeSpecName: "kube-api-access-kdscd") pod "e3504f2d-9001-4652-8404-4f2ac4265eef" (UID: "e3504f2d-9001-4652-8404-4f2ac4265eef"). InnerVolumeSpecName "kube-api-access-kdscd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.252481 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory" (OuterVolumeSpecName: "inventory") pod "e3504f2d-9001-4652-8404-4f2ac4265eef" (UID: "e3504f2d-9001-4652-8404-4f2ac4265eef"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.252795 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e3504f2d-9001-4652-8404-4f2ac4265eef" (UID: "e3504f2d-9001-4652-8404-4f2ac4265eef"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.329840 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdscd\" (UniqueName: \"kubernetes.io/projected/e3504f2d-9001-4652-8404-4f2ac4265eef-kube-api-access-kdscd\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.329901 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.329919 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.329938 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e3504f2d-9001-4652-8404-4f2ac4265eef-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.684259 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" event={"ID":"e3504f2d-9001-4652-8404-4f2ac4265eef","Type":"ContainerDied","Data":"12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d"} Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.684302 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12e0e56fe8522a23416ccd2f8f871b294af8e9bb43deb90dbc72d31b7a20965d" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.684345 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-486w8" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.768963 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2"] Nov 29 07:35:55 crc kubenswrapper[4943]: E1129 07:35:55.769321 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3504f2d-9001-4652-8404-4f2ac4265eef" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.769339 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3504f2d-9001-4652-8404-4f2ac4265eef" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.769503 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3504f2d-9001-4652-8404-4f2ac4265eef" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.770073 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.772802 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.774227 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.776543 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.777089 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.777432 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.781716 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2"] Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.838261 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shl8s\" (UniqueName: \"kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.838355 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.838473 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.838510 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.940075 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shl8s\" (UniqueName: \"kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.940171 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.940317 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.940351 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.944744 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.944806 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.950480 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:55 crc kubenswrapper[4943]: I1129 07:35:55.954981 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shl8s\" (UniqueName: \"kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:56 crc kubenswrapper[4943]: I1129 07:35:56.132474 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:35:56 crc kubenswrapper[4943]: I1129 07:35:56.639352 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2"] Nov 29 07:35:56 crc kubenswrapper[4943]: I1129 07:35:56.691909 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" event={"ID":"d949007e-fcdf-4a59-a11c-bc2c210c1f58","Type":"ContainerStarted","Data":"8a8c40ee14d4e2042892f40adba862cacc5415f4966ac0333a9c588af066a101"} Nov 29 07:35:57 crc kubenswrapper[4943]: I1129 07:35:57.701528 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" event={"ID":"d949007e-fcdf-4a59-a11c-bc2c210c1f58","Type":"ContainerStarted","Data":"baa89f132115557d039a73d5ea3cb2a983e2933fc90f82a12b58311bbc463d1e"} Nov 29 07:35:57 crc kubenswrapper[4943]: I1129 07:35:57.729407 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" podStartSLOduration=2.271656975 podStartE2EDuration="2.729387279s" podCreationTimestamp="2025-11-29 07:35:55 +0000 UTC" firstStartedPulling="2025-11-29 07:35:56.653639735 +0000 UTC m=+3731.583728488" lastFinishedPulling="2025-11-29 07:35:57.111370039 +0000 UTC m=+3732.041458792" observedRunningTime="2025-11-29 07:35:57.723979697 +0000 UTC m=+3732.654068460" watchObservedRunningTime="2025-11-29 07:35:57.729387279 +0000 UTC m=+3732.659476032" Nov 29 07:36:02 crc kubenswrapper[4943]: I1129 07:36:02.739454 4943 generic.go:334] "Generic (PLEG): container finished" podID="d949007e-fcdf-4a59-a11c-bc2c210c1f58" containerID="baa89f132115557d039a73d5ea3cb2a983e2933fc90f82a12b58311bbc463d1e" exitCode=0 Nov 29 07:36:02 crc kubenswrapper[4943]: I1129 07:36:02.739555 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" event={"ID":"d949007e-fcdf-4a59-a11c-bc2c210c1f58","Type":"ContainerDied","Data":"baa89f132115557d039a73d5ea3cb2a983e2933fc90f82a12b58311bbc463d1e"} Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.124444 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.189237 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph\") pod \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.189301 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key\") pod \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.189352 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shl8s\" (UniqueName: \"kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s\") pod \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.189481 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory\") pod \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\" (UID: \"d949007e-fcdf-4a59-a11c-bc2c210c1f58\") " Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.194999 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s" (OuterVolumeSpecName: "kube-api-access-shl8s") pod "d949007e-fcdf-4a59-a11c-bc2c210c1f58" (UID: "d949007e-fcdf-4a59-a11c-bc2c210c1f58"). InnerVolumeSpecName "kube-api-access-shl8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.195529 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph" (OuterVolumeSpecName: "ceph") pod "d949007e-fcdf-4a59-a11c-bc2c210c1f58" (UID: "d949007e-fcdf-4a59-a11c-bc2c210c1f58"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.215362 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory" (OuterVolumeSpecName: "inventory") pod "d949007e-fcdf-4a59-a11c-bc2c210c1f58" (UID: "d949007e-fcdf-4a59-a11c-bc2c210c1f58"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.219980 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d949007e-fcdf-4a59-a11c-bc2c210c1f58" (UID: "d949007e-fcdf-4a59-a11c-bc2c210c1f58"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.291240 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.291269 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.291280 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shl8s\" (UniqueName: \"kubernetes.io/projected/d949007e-fcdf-4a59-a11c-bc2c210c1f58-kube-api-access-shl8s\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.291288 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d949007e-fcdf-4a59-a11c-bc2c210c1f58-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.758681 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" event={"ID":"d949007e-fcdf-4a59-a11c-bc2c210c1f58","Type":"ContainerDied","Data":"8a8c40ee14d4e2042892f40adba862cacc5415f4966ac0333a9c588af066a101"} Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.758727 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a8c40ee14d4e2042892f40adba862cacc5415f4966ac0333a9c588af066a101" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.758761 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.869708 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv"] Nov 29 07:36:04 crc kubenswrapper[4943]: E1129 07:36:04.870753 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d949007e-fcdf-4a59-a11c-bc2c210c1f58" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.870884 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d949007e-fcdf-4a59-a11c-bc2c210c1f58" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.871190 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d949007e-fcdf-4a59-a11c-bc2c210c1f58" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.871816 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.874081 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.874382 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.874740 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.874758 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.875633 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.946884 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv"] Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.948086 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.948144 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.948169 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:04 crc kubenswrapper[4943]: I1129 07:36:04.948281 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zscx8\" (UniqueName: \"kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.049914 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.049974 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.050002 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.050138 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zscx8\" (UniqueName: \"kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.055932 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.055977 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.055943 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.066759 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zscx8\" (UniqueName: \"kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-q7flv\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.248731 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:05 crc kubenswrapper[4943]: W1129 07:36:05.754166 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfa047b7_dae4_49f4_9cfd_5e9492f3620f.slice/crio-bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb WatchSource:0}: Error finding container bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb: Status 404 returned error can't find the container with id bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.761181 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv"] Nov 29 07:36:05 crc kubenswrapper[4943]: I1129 07:36:05.769957 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" event={"ID":"dfa047b7-dae4-49f4-9cfd-5e9492f3620f","Type":"ContainerStarted","Data":"bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb"} Nov 29 07:36:06 crc kubenswrapper[4943]: I1129 07:36:06.779548 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" event={"ID":"dfa047b7-dae4-49f4-9cfd-5e9492f3620f","Type":"ContainerStarted","Data":"56d99d12c7050556da8e97ec5e30c176e552d4dc9e36105fee79994718b9df2c"} Nov 29 07:36:06 crc kubenswrapper[4943]: I1129 07:36:06.808789 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" podStartSLOduration=2.337607352 podStartE2EDuration="2.808772855s" podCreationTimestamp="2025-11-29 07:36:04 +0000 UTC" firstStartedPulling="2025-11-29 07:36:05.756234469 +0000 UTC m=+3740.686323222" lastFinishedPulling="2025-11-29 07:36:06.227399982 +0000 UTC m=+3741.157488725" observedRunningTime="2025-11-29 07:36:06.803368463 +0000 UTC m=+3741.733457226" watchObservedRunningTime="2025-11-29 07:36:06.808772855 +0000 UTC m=+3741.738861608" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.104216 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.107275 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.117920 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.288641 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.288725 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.288954 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsjbf\" (UniqueName: \"kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.390335 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsjbf\" (UniqueName: \"kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.390682 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.390825 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.391320 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.391350 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.412932 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsjbf\" (UniqueName: \"kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf\") pod \"redhat-marketplace-p2pqh\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.425158 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:33 crc kubenswrapper[4943]: I1129 07:36:33.889212 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:34 crc kubenswrapper[4943]: I1129 07:36:34.002670 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerStarted","Data":"303bd30719ed0e4b7a99eb4bcdbf70d5d3719b97c0c08fe0b50e3d35717524fc"} Nov 29 07:36:35 crc kubenswrapper[4943]: I1129 07:36:35.012486 4943 generic.go:334] "Generic (PLEG): container finished" podID="cc711866-fee1-4df8-91b3-ef2158044f94" containerID="a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9" exitCode=0 Nov 29 07:36:35 crc kubenswrapper[4943]: I1129 07:36:35.012576 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerDied","Data":"a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9"} Nov 29 07:36:35 crc kubenswrapper[4943]: I1129 07:36:35.014574 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:36:37 crc kubenswrapper[4943]: I1129 07:36:37.033231 4943 generic.go:334] "Generic (PLEG): container finished" podID="cc711866-fee1-4df8-91b3-ef2158044f94" containerID="a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e" exitCode=0 Nov 29 07:36:37 crc kubenswrapper[4943]: I1129 07:36:37.033281 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerDied","Data":"a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e"} Nov 29 07:36:39 crc kubenswrapper[4943]: I1129 07:36:39.055586 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerStarted","Data":"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c"} Nov 29 07:36:39 crc kubenswrapper[4943]: I1129 07:36:39.086925 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p2pqh" podStartSLOduration=3.407851825 podStartE2EDuration="6.086907659s" podCreationTimestamp="2025-11-29 07:36:33 +0000 UTC" firstStartedPulling="2025-11-29 07:36:35.014381926 +0000 UTC m=+3769.944470679" lastFinishedPulling="2025-11-29 07:36:37.69343776 +0000 UTC m=+3772.623526513" observedRunningTime="2025-11-29 07:36:39.078711239 +0000 UTC m=+3774.008799992" watchObservedRunningTime="2025-11-29 07:36:39.086907659 +0000 UTC m=+3774.016996412" Nov 29 07:36:43 crc kubenswrapper[4943]: I1129 07:36:43.425465 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:43 crc kubenswrapper[4943]: I1129 07:36:43.427123 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:43 crc kubenswrapper[4943]: I1129 07:36:43.512407 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:44 crc kubenswrapper[4943]: I1129 07:36:44.197371 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:44 crc kubenswrapper[4943]: I1129 07:36:44.280408 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.133758 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p2pqh" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="registry-server" containerID="cri-o://a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c" gracePeriod=2 Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.706614 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.766259 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities\") pod \"cc711866-fee1-4df8-91b3-ef2158044f94\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.766315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsjbf\" (UniqueName: \"kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf\") pod \"cc711866-fee1-4df8-91b3-ef2158044f94\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.766533 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content\") pod \"cc711866-fee1-4df8-91b3-ef2158044f94\" (UID: \"cc711866-fee1-4df8-91b3-ef2158044f94\") " Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.769724 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities" (OuterVolumeSpecName: "utilities") pod "cc711866-fee1-4df8-91b3-ef2158044f94" (UID: "cc711866-fee1-4df8-91b3-ef2158044f94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.788831 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf" (OuterVolumeSpecName: "kube-api-access-jsjbf") pod "cc711866-fee1-4df8-91b3-ef2158044f94" (UID: "cc711866-fee1-4df8-91b3-ef2158044f94"). InnerVolumeSpecName "kube-api-access-jsjbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.791271 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc711866-fee1-4df8-91b3-ef2158044f94" (UID: "cc711866-fee1-4df8-91b3-ef2158044f94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.869617 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.869668 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc711866-fee1-4df8-91b3-ef2158044f94-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:46 crc kubenswrapper[4943]: I1129 07:36:46.869678 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsjbf\" (UniqueName: \"kubernetes.io/projected/cc711866-fee1-4df8-91b3-ef2158044f94-kube-api-access-jsjbf\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.151056 4943 generic.go:334] "Generic (PLEG): container finished" podID="cc711866-fee1-4df8-91b3-ef2158044f94" containerID="a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c" exitCode=0 Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.151136 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerDied","Data":"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c"} Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.151529 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2pqh" event={"ID":"cc711866-fee1-4df8-91b3-ef2158044f94","Type":"ContainerDied","Data":"303bd30719ed0e4b7a99eb4bcdbf70d5d3719b97c0c08fe0b50e3d35717524fc"} Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.151589 4943 scope.go:117] "RemoveContainer" containerID="a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.151165 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2pqh" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.303384 4943 scope.go:117] "RemoveContainer" containerID="a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.349399 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.354179 4943 scope.go:117] "RemoveContainer" containerID="a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.355733 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2pqh"] Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.381771 4943 scope.go:117] "RemoveContainer" containerID="a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c" Nov 29 07:36:47 crc kubenswrapper[4943]: E1129 07:36:47.382239 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c\": container with ID starting with a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c not found: ID does not exist" containerID="a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.382333 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c"} err="failed to get container status \"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c\": rpc error: code = NotFound desc = could not find container \"a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c\": container with ID starting with a9725083fea7a8ee176a3738da3108517fe659dbc0860e769547cd87d378173c not found: ID does not exist" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.382414 4943 scope.go:117] "RemoveContainer" containerID="a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e" Nov 29 07:36:47 crc kubenswrapper[4943]: E1129 07:36:47.382808 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e\": container with ID starting with a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e not found: ID does not exist" containerID="a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.382897 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e"} err="failed to get container status \"a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e\": rpc error: code = NotFound desc = could not find container \"a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e\": container with ID starting with a746c72ed04c6c25cdd4833d29ebc5bc85e16e00a339d2327b5643360852258e not found: ID does not exist" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.382966 4943 scope.go:117] "RemoveContainer" containerID="a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9" Nov 29 07:36:47 crc kubenswrapper[4943]: E1129 07:36:47.383297 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9\": container with ID starting with a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9 not found: ID does not exist" containerID="a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9" Nov 29 07:36:47 crc kubenswrapper[4943]: I1129 07:36:47.383405 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9"} err="failed to get container status \"a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9\": rpc error: code = NotFound desc = could not find container \"a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9\": container with ID starting with a1cfc3bf49285e7631378f09b982fda87f877ac48a507963430dc9e5ff4bfbd9 not found: ID does not exist" Nov 29 07:36:49 crc kubenswrapper[4943]: I1129 07:36:49.346785 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" path="/var/lib/kubelet/pods/cc711866-fee1-4df8-91b3-ef2158044f94/volumes" Nov 29 07:36:50 crc kubenswrapper[4943]: I1129 07:36:50.180858 4943 generic.go:334] "Generic (PLEG): container finished" podID="dfa047b7-dae4-49f4-9cfd-5e9492f3620f" containerID="56d99d12c7050556da8e97ec5e30c176e552d4dc9e36105fee79994718b9df2c" exitCode=0 Nov 29 07:36:50 crc kubenswrapper[4943]: I1129 07:36:50.180922 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" event={"ID":"dfa047b7-dae4-49f4-9cfd-5e9492f3620f","Type":"ContainerDied","Data":"56d99d12c7050556da8e97ec5e30c176e552d4dc9e36105fee79994718b9df2c"} Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.644070 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.787066 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key\") pod \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.787145 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory\") pod \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.787385 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph\") pod \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.787465 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zscx8\" (UniqueName: \"kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8\") pod \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\" (UID: \"dfa047b7-dae4-49f4-9cfd-5e9492f3620f\") " Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.798817 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8" (OuterVolumeSpecName: "kube-api-access-zscx8") pod "dfa047b7-dae4-49f4-9cfd-5e9492f3620f" (UID: "dfa047b7-dae4-49f4-9cfd-5e9492f3620f"). InnerVolumeSpecName "kube-api-access-zscx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.800698 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph" (OuterVolumeSpecName: "ceph") pod "dfa047b7-dae4-49f4-9cfd-5e9492f3620f" (UID: "dfa047b7-dae4-49f4-9cfd-5e9492f3620f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.842858 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dfa047b7-dae4-49f4-9cfd-5e9492f3620f" (UID: "dfa047b7-dae4-49f4-9cfd-5e9492f3620f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.846814 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory" (OuterVolumeSpecName: "inventory") pod "dfa047b7-dae4-49f4-9cfd-5e9492f3620f" (UID: "dfa047b7-dae4-49f4-9cfd-5e9492f3620f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.892752 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zscx8\" (UniqueName: \"kubernetes.io/projected/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-kube-api-access-zscx8\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.892783 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.892795 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:51 crc kubenswrapper[4943]: I1129 07:36:51.892803 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dfa047b7-dae4-49f4-9cfd-5e9492f3620f-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.205687 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" event={"ID":"dfa047b7-dae4-49f4-9cfd-5e9492f3620f","Type":"ContainerDied","Data":"bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb"} Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.206181 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf2622a99ce8e27be08af0d3f9a8e982e10d4cb13ebe659cf8609206a45d2dcb" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.205802 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-q7flv" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.302622 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l"] Nov 29 07:36:52 crc kubenswrapper[4943]: E1129 07:36:52.303397 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="extract-content" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303421 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="extract-content" Nov 29 07:36:52 crc kubenswrapper[4943]: E1129 07:36:52.303447 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="registry-server" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303455 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="registry-server" Nov 29 07:36:52 crc kubenswrapper[4943]: E1129 07:36:52.303472 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="extract-utilities" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303480 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="extract-utilities" Nov 29 07:36:52 crc kubenswrapper[4943]: E1129 07:36:52.303498 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfa047b7-dae4-49f4-9cfd-5e9492f3620f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303507 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfa047b7-dae4-49f4-9cfd-5e9492f3620f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303950 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc711866-fee1-4df8-91b3-ef2158044f94" containerName="registry-server" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.303974 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfa047b7-dae4-49f4-9cfd-5e9492f3620f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.304839 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.308105 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.308920 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.309084 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.309843 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.310013 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.313904 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l"] Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.501969 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.502043 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m59bb\" (UniqueName: \"kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.502086 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.502146 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.604217 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.604526 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m59bb\" (UniqueName: \"kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.604711 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.604854 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.610518 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.611168 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.616524 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.623482 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m59bb\" (UniqueName: \"kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:52 crc kubenswrapper[4943]: I1129 07:36:52.922926 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:53 crc kubenswrapper[4943]: I1129 07:36:53.452172 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l"] Nov 29 07:36:54 crc kubenswrapper[4943]: I1129 07:36:54.226174 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" event={"ID":"5d32eb7f-e003-4ace-be60-a38ea0701477","Type":"ContainerStarted","Data":"9be418aa1d601574f7657c6516feb7eb89531c12aa9c68c2d482ded971641f59"} Nov 29 07:36:54 crc kubenswrapper[4943]: I1129 07:36:54.226513 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" event={"ID":"5d32eb7f-e003-4ace-be60-a38ea0701477","Type":"ContainerStarted","Data":"5646565f5b27047fdb6f1434ae774fa1c03b6b18932c51b066ba81da032005cd"} Nov 29 07:36:54 crc kubenswrapper[4943]: I1129 07:36:54.242702 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" podStartSLOduration=1.7671050990000001 podStartE2EDuration="2.24268429s" podCreationTimestamp="2025-11-29 07:36:52 +0000 UTC" firstStartedPulling="2025-11-29 07:36:53.457879343 +0000 UTC m=+3788.387968106" lastFinishedPulling="2025-11-29 07:36:53.933458524 +0000 UTC m=+3788.863547297" observedRunningTime="2025-11-29 07:36:54.240135267 +0000 UTC m=+3789.170224030" watchObservedRunningTime="2025-11-29 07:36:54.24268429 +0000 UTC m=+3789.172773043" Nov 29 07:36:58 crc kubenswrapper[4943]: I1129 07:36:58.270505 4943 generic.go:334] "Generic (PLEG): container finished" podID="5d32eb7f-e003-4ace-be60-a38ea0701477" containerID="9be418aa1d601574f7657c6516feb7eb89531c12aa9c68c2d482ded971641f59" exitCode=0 Nov 29 07:36:58 crc kubenswrapper[4943]: I1129 07:36:58.270625 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" event={"ID":"5d32eb7f-e003-4ace-be60-a38ea0701477","Type":"ContainerDied","Data":"9be418aa1d601574f7657c6516feb7eb89531c12aa9c68c2d482ded971641f59"} Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.845164 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.939110 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory\") pod \"5d32eb7f-e003-4ace-be60-a38ea0701477\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.939193 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph\") pod \"5d32eb7f-e003-4ace-be60-a38ea0701477\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.939245 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key\") pod \"5d32eb7f-e003-4ace-be60-a38ea0701477\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.939273 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m59bb\" (UniqueName: \"kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb\") pod \"5d32eb7f-e003-4ace-be60-a38ea0701477\" (UID: \"5d32eb7f-e003-4ace-be60-a38ea0701477\") " Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.955781 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph" (OuterVolumeSpecName: "ceph") pod "5d32eb7f-e003-4ace-be60-a38ea0701477" (UID: "5d32eb7f-e003-4ace-be60-a38ea0701477"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.955802 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb" (OuterVolumeSpecName: "kube-api-access-m59bb") pod "5d32eb7f-e003-4ace-be60-a38ea0701477" (UID: "5d32eb7f-e003-4ace-be60-a38ea0701477"). InnerVolumeSpecName "kube-api-access-m59bb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.965675 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory" (OuterVolumeSpecName: "inventory") pod "5d32eb7f-e003-4ace-be60-a38ea0701477" (UID: "5d32eb7f-e003-4ace-be60-a38ea0701477"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:36:59 crc kubenswrapper[4943]: I1129 07:36:59.977746 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5d32eb7f-e003-4ace-be60-a38ea0701477" (UID: "5d32eb7f-e003-4ace-be60-a38ea0701477"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.040727 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m59bb\" (UniqueName: \"kubernetes.io/projected/5d32eb7f-e003-4ace-be60-a38ea0701477-kube-api-access-m59bb\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.041001 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.041011 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.041020 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d32eb7f-e003-4ace-be60-a38ea0701477-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.288460 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" event={"ID":"5d32eb7f-e003-4ace-be60-a38ea0701477","Type":"ContainerDied","Data":"5646565f5b27047fdb6f1434ae774fa1c03b6b18932c51b066ba81da032005cd"} Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.288504 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5646565f5b27047fdb6f1434ae774fa1c03b6b18932c51b066ba81da032005cd" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.288586 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.387741 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b"] Nov 29 07:37:00 crc kubenswrapper[4943]: E1129 07:37:00.388240 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d32eb7f-e003-4ace-be60-a38ea0701477" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.388274 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d32eb7f-e003-4ace-be60-a38ea0701477" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.388549 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d32eb7f-e003-4ace-be60-a38ea0701477" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.389489 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.391470 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.391868 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.392590 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.392613 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.396181 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.415516 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b"] Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.552669 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.552720 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.552767 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.552835 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c25gr\" (UniqueName: \"kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.654783 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.654873 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.654934 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.655017 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c25gr\" (UniqueName: \"kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.660645 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.661684 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.662775 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.675196 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c25gr\" (UniqueName: \"kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:00 crc kubenswrapper[4943]: I1129 07:37:00.741317 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:01 crc kubenswrapper[4943]: I1129 07:37:01.297747 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b"] Nov 29 07:37:02 crc kubenswrapper[4943]: I1129 07:37:02.308794 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" event={"ID":"fa69db4d-9f15-4890-bf12-437377c8f3a7","Type":"ContainerStarted","Data":"b9a1057183cca9a3926468b23ac1366a3d0b713012818fb74140f44c5987d715"} Nov 29 07:37:02 crc kubenswrapper[4943]: I1129 07:37:02.612904 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:37:02 crc kubenswrapper[4943]: I1129 07:37:02.612972 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:37:03 crc kubenswrapper[4943]: I1129 07:37:03.316392 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" event={"ID":"fa69db4d-9f15-4890-bf12-437377c8f3a7","Type":"ContainerStarted","Data":"91ab2b9e6fa38a50f0959ad675464da19fe2b3a3afba91451a2406dce7b9941e"} Nov 29 07:37:03 crc kubenswrapper[4943]: I1129 07:37:03.333471 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" podStartSLOduration=2.8895582319999997 podStartE2EDuration="3.333455676s" podCreationTimestamp="2025-11-29 07:37:00 +0000 UTC" firstStartedPulling="2025-11-29 07:37:01.738803888 +0000 UTC m=+3796.668892641" lastFinishedPulling="2025-11-29 07:37:02.182701292 +0000 UTC m=+3797.112790085" observedRunningTime="2025-11-29 07:37:03.331061607 +0000 UTC m=+3798.261150370" watchObservedRunningTime="2025-11-29 07:37:03.333455676 +0000 UTC m=+3798.263544429" Nov 29 07:37:32 crc kubenswrapper[4943]: I1129 07:37:32.613512 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:37:32 crc kubenswrapper[4943]: I1129 07:37:32.614012 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:37:52 crc kubenswrapper[4943]: I1129 07:37:52.773455 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" event={"ID":"fa69db4d-9f15-4890-bf12-437377c8f3a7","Type":"ContainerDied","Data":"91ab2b9e6fa38a50f0959ad675464da19fe2b3a3afba91451a2406dce7b9941e"} Nov 29 07:37:52 crc kubenswrapper[4943]: I1129 07:37:52.773469 4943 generic.go:334] "Generic (PLEG): container finished" podID="fa69db4d-9f15-4890-bf12-437377c8f3a7" containerID="91ab2b9e6fa38a50f0959ad675464da19fe2b3a3afba91451a2406dce7b9941e" exitCode=0 Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.214451 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.293281 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory\") pod \"fa69db4d-9f15-4890-bf12-437377c8f3a7\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.293433 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c25gr\" (UniqueName: \"kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr\") pod \"fa69db4d-9f15-4890-bf12-437377c8f3a7\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.293471 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph\") pod \"fa69db4d-9f15-4890-bf12-437377c8f3a7\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.293538 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key\") pod \"fa69db4d-9f15-4890-bf12-437377c8f3a7\" (UID: \"fa69db4d-9f15-4890-bf12-437377c8f3a7\") " Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.299158 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph" (OuterVolumeSpecName: "ceph") pod "fa69db4d-9f15-4890-bf12-437377c8f3a7" (UID: "fa69db4d-9f15-4890-bf12-437377c8f3a7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.299915 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr" (OuterVolumeSpecName: "kube-api-access-c25gr") pod "fa69db4d-9f15-4890-bf12-437377c8f3a7" (UID: "fa69db4d-9f15-4890-bf12-437377c8f3a7"). InnerVolumeSpecName "kube-api-access-c25gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.322619 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fa69db4d-9f15-4890-bf12-437377c8f3a7" (UID: "fa69db4d-9f15-4890-bf12-437377c8f3a7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.329189 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory" (OuterVolumeSpecName: "inventory") pod "fa69db4d-9f15-4890-bf12-437377c8f3a7" (UID: "fa69db4d-9f15-4890-bf12-437377c8f3a7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.395269 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.395303 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.395312 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c25gr\" (UniqueName: \"kubernetes.io/projected/fa69db4d-9f15-4890-bf12-437377c8f3a7-kube-api-access-c25gr\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.395322 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fa69db4d-9f15-4890-bf12-437377c8f3a7-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.796396 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" event={"ID":"fa69db4d-9f15-4890-bf12-437377c8f3a7","Type":"ContainerDied","Data":"b9a1057183cca9a3926468b23ac1366a3d0b713012818fb74140f44c5987d715"} Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.796542 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.797796 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9a1057183cca9a3926468b23ac1366a3d0b713012818fb74140f44c5987d715" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.895338 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xjccs"] Nov 29 07:37:54 crc kubenswrapper[4943]: E1129 07:37:54.895908 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa69db4d-9f15-4890-bf12-437377c8f3a7" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.895928 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa69db4d-9f15-4890-bf12-437377c8f3a7" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.896147 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa69db4d-9f15-4890-bf12-437377c8f3a7" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.896866 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.898771 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.899058 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.899064 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.899532 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.899830 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.902437 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xjccs"] Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.906641 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.906716 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ccks\" (UniqueName: \"kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.906816 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:54 crc kubenswrapper[4943]: I1129 07:37:54.906866 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.008822 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.008955 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.009012 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.009114 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ccks\" (UniqueName: \"kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.014316 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.014464 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.015241 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.030467 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ccks\" (UniqueName: \"kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks\") pod \"ssh-known-hosts-edpm-deployment-xjccs\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.221355 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.778475 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-xjccs"] Nov 29 07:37:55 crc kubenswrapper[4943]: I1129 07:37:55.804467 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" event={"ID":"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43","Type":"ContainerStarted","Data":"36a053b9fa99c5a1eb60d6192b8f8ae858c71f2545e67f2764d0d30aefb780e1"} Nov 29 07:37:56 crc kubenswrapper[4943]: I1129 07:37:56.813810 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" event={"ID":"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43","Type":"ContainerStarted","Data":"de87e2f05df3900f971d3691adec9dc85fe4c6bdf10edb37ecde6d7361199cb7"} Nov 29 07:37:56 crc kubenswrapper[4943]: I1129 07:37:56.835815 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" podStartSLOduration=2.256501449 podStartE2EDuration="2.835798972s" podCreationTimestamp="2025-11-29 07:37:54 +0000 UTC" firstStartedPulling="2025-11-29 07:37:55.786096875 +0000 UTC m=+3850.716185628" lastFinishedPulling="2025-11-29 07:37:56.365394388 +0000 UTC m=+3851.295483151" observedRunningTime="2025-11-29 07:37:56.827920979 +0000 UTC m=+3851.758009732" watchObservedRunningTime="2025-11-29 07:37:56.835798972 +0000 UTC m=+3851.765887715" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.613092 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.613547 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.613599 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.614227 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.614277 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" gracePeriod=600 Nov 29 07:38:02 crc kubenswrapper[4943]: E1129 07:38:02.748895 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.869439 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" exitCode=0 Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.869513 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09"} Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.869590 4943 scope.go:117] "RemoveContainer" containerID="5f513ae3d5801e75b5010f5b7e9d196a35e94b2d26664a48e9b728eeeb94c65e" Nov 29 07:38:02 crc kubenswrapper[4943]: I1129 07:38:02.870432 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:38:02 crc kubenswrapper[4943]: E1129 07:38:02.870830 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:38:07 crc kubenswrapper[4943]: I1129 07:38:07.967406 4943 generic.go:334] "Generic (PLEG): container finished" podID="d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" containerID="de87e2f05df3900f971d3691adec9dc85fe4c6bdf10edb37ecde6d7361199cb7" exitCode=0 Nov 29 07:38:07 crc kubenswrapper[4943]: I1129 07:38:07.967505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" event={"ID":"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43","Type":"ContainerDied","Data":"de87e2f05df3900f971d3691adec9dc85fe4c6bdf10edb37ecde6d7361199cb7"} Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.449018 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.578232 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ccks\" (UniqueName: \"kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks\") pod \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.578333 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph\") pod \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.578382 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam\") pod \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.578496 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0\") pod \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\" (UID: \"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43\") " Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.585617 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks" (OuterVolumeSpecName: "kube-api-access-5ccks") pod "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" (UID: "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43"). InnerVolumeSpecName "kube-api-access-5ccks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.589821 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph" (OuterVolumeSpecName: "ceph") pod "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" (UID: "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.643174 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" (UID: "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.649382 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" (UID: "d72cb85c-1d10-4baf-bdfb-ff2c9f927f43"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.681214 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ccks\" (UniqueName: \"kubernetes.io/projected/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-kube-api-access-5ccks\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.681308 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.681359 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.681373 4943 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d72cb85c-1d10-4baf-bdfb-ff2c9f927f43-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.988885 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" event={"ID":"d72cb85c-1d10-4baf-bdfb-ff2c9f927f43","Type":"ContainerDied","Data":"36a053b9fa99c5a1eb60d6192b8f8ae858c71f2545e67f2764d0d30aefb780e1"} Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.988936 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-xjccs" Nov 29 07:38:09 crc kubenswrapper[4943]: I1129 07:38:09.989650 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36a053b9fa99c5a1eb60d6192b8f8ae858c71f2545e67f2764d0d30aefb780e1" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.080419 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z"] Nov 29 07:38:10 crc kubenswrapper[4943]: E1129 07:38:10.080777 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.080791 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.080974 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d72cb85c-1d10-4baf-bdfb-ff2c9f927f43" containerName="ssh-known-hosts-edpm-deployment" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.081541 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.083484 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.084007 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.084122 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.084189 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.084306 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.100281 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z"] Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.188616 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.188703 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.188881 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4prn\" (UniqueName: \"kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.189110 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.290716 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.290831 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.290907 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4prn\" (UniqueName: \"kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.291038 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.296987 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.297390 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.297419 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.320139 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4prn\" (UniqueName: \"kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-jcp4z\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:10 crc kubenswrapper[4943]: I1129 07:38:10.396957 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:11 crc kubenswrapper[4943]: I1129 07:38:11.153148 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z"] Nov 29 07:38:11 crc kubenswrapper[4943]: W1129 07:38:11.164820 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f3aaa0c_0442_4417_8963_ad0640858384.slice/crio-727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c WatchSource:0}: Error finding container 727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c: Status 404 returned error can't find the container with id 727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c Nov 29 07:38:12 crc kubenswrapper[4943]: I1129 07:38:12.007420 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" event={"ID":"1f3aaa0c-0442-4417-8963-ad0640858384","Type":"ContainerStarted","Data":"727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c"} Nov 29 07:38:13 crc kubenswrapper[4943]: I1129 07:38:13.020505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" event={"ID":"1f3aaa0c-0442-4417-8963-ad0640858384","Type":"ContainerStarted","Data":"f35b7a59aafcb0e98ecd9673446fc7e73242b419ec02002e14f1897062b85085"} Nov 29 07:38:16 crc kubenswrapper[4943]: I1129 07:38:16.327243 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:38:16 crc kubenswrapper[4943]: E1129 07:38:16.327892 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:38:21 crc kubenswrapper[4943]: I1129 07:38:21.101101 4943 generic.go:334] "Generic (PLEG): container finished" podID="1f3aaa0c-0442-4417-8963-ad0640858384" containerID="f35b7a59aafcb0e98ecd9673446fc7e73242b419ec02002e14f1897062b85085" exitCode=0 Nov 29 07:38:21 crc kubenswrapper[4943]: I1129 07:38:21.101639 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" event={"ID":"1f3aaa0c-0442-4417-8963-ad0640858384","Type":"ContainerDied","Data":"f35b7a59aafcb0e98ecd9673446fc7e73242b419ec02002e14f1897062b85085"} Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.754317 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.850799 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4prn\" (UniqueName: \"kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn\") pod \"1f3aaa0c-0442-4417-8963-ad0640858384\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.851669 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph\") pod \"1f3aaa0c-0442-4417-8963-ad0640858384\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.851866 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key\") pod \"1f3aaa0c-0442-4417-8963-ad0640858384\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.851927 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory\") pod \"1f3aaa0c-0442-4417-8963-ad0640858384\" (UID: \"1f3aaa0c-0442-4417-8963-ad0640858384\") " Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.864450 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph" (OuterVolumeSpecName: "ceph") pod "1f3aaa0c-0442-4417-8963-ad0640858384" (UID: "1f3aaa0c-0442-4417-8963-ad0640858384"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.864902 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn" (OuterVolumeSpecName: "kube-api-access-s4prn") pod "1f3aaa0c-0442-4417-8963-ad0640858384" (UID: "1f3aaa0c-0442-4417-8963-ad0640858384"). InnerVolumeSpecName "kube-api-access-s4prn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.888694 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory" (OuterVolumeSpecName: "inventory") pod "1f3aaa0c-0442-4417-8963-ad0640858384" (UID: "1f3aaa0c-0442-4417-8963-ad0640858384"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.906438 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1f3aaa0c-0442-4417-8963-ad0640858384" (UID: "1f3aaa0c-0442-4417-8963-ad0640858384"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.957299 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.957331 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.957340 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4prn\" (UniqueName: \"kubernetes.io/projected/1f3aaa0c-0442-4417-8963-ad0640858384-kube-api-access-s4prn\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:22 crc kubenswrapper[4943]: I1129 07:38:22.957349 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1f3aaa0c-0442-4417-8963-ad0640858384-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.119628 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" event={"ID":"1f3aaa0c-0442-4417-8963-ad0640858384","Type":"ContainerDied","Data":"727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c"} Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.119667 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="727cb5e91829e1bff07bb6cbe09b8c5a0903b3d91a9846221f49623d9882ae3c" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.119733 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-jcp4z" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.176913 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v"] Nov 29 07:38:23 crc kubenswrapper[4943]: E1129 07:38:23.177244 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f3aaa0c-0442-4417-8963-ad0640858384" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.177263 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f3aaa0c-0442-4417-8963-ad0640858384" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.177420 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f3aaa0c-0442-4417-8963-ad0640858384" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.177997 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.180530 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.180651 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.180773 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.180937 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.181819 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.197907 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v"] Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.262014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.262205 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.262457 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82g5j\" (UniqueName: \"kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.262499 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.363641 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82g5j\" (UniqueName: \"kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.363783 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.363867 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.363900 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.368641 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.381726 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.383081 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.390432 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82g5j\" (UniqueName: \"kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:23 crc kubenswrapper[4943]: I1129 07:38:23.492310 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:24 crc kubenswrapper[4943]: I1129 07:38:24.286495 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v"] Nov 29 07:38:25 crc kubenswrapper[4943]: I1129 07:38:25.145813 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" event={"ID":"988e60e7-eeec-4335-8f34-8a0f7228dd59","Type":"ContainerStarted","Data":"ded5bfa3a57d3958697ea9115592d154af0de4fafd4dd74bd6f043a88a036801"} Nov 29 07:38:26 crc kubenswrapper[4943]: I1129 07:38:26.159240 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" event={"ID":"988e60e7-eeec-4335-8f34-8a0f7228dd59","Type":"ContainerStarted","Data":"dcfbb0f4b3c1c5c4174152313c2c44c3b8c39750128948cbf4d454ef47e2ac93"} Nov 29 07:38:26 crc kubenswrapper[4943]: I1129 07:38:26.186122 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" podStartSLOduration=2.58343118 podStartE2EDuration="3.186098484s" podCreationTimestamp="2025-11-29 07:38:23 +0000 UTC" firstStartedPulling="2025-11-29 07:38:24.286021424 +0000 UTC m=+3879.216110177" lastFinishedPulling="2025-11-29 07:38:24.888688738 +0000 UTC m=+3879.818777481" observedRunningTime="2025-11-29 07:38:26.181406119 +0000 UTC m=+3881.111494882" watchObservedRunningTime="2025-11-29 07:38:26.186098484 +0000 UTC m=+3881.116187247" Nov 29 07:38:30 crc kubenswrapper[4943]: I1129 07:38:30.328389 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:38:30 crc kubenswrapper[4943]: E1129 07:38:30.329151 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:38:37 crc kubenswrapper[4943]: I1129 07:38:37.259919 4943 generic.go:334] "Generic (PLEG): container finished" podID="988e60e7-eeec-4335-8f34-8a0f7228dd59" containerID="dcfbb0f4b3c1c5c4174152313c2c44c3b8c39750128948cbf4d454ef47e2ac93" exitCode=0 Nov 29 07:38:37 crc kubenswrapper[4943]: I1129 07:38:37.260032 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" event={"ID":"988e60e7-eeec-4335-8f34-8a0f7228dd59","Type":"ContainerDied","Data":"dcfbb0f4b3c1c5c4174152313c2c44c3b8c39750128948cbf4d454ef47e2ac93"} Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.228857 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.281207 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" event={"ID":"988e60e7-eeec-4335-8f34-8a0f7228dd59","Type":"ContainerDied","Data":"ded5bfa3a57d3958697ea9115592d154af0de4fafd4dd74bd6f043a88a036801"} Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.281312 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ded5bfa3a57d3958697ea9115592d154af0de4fafd4dd74bd6f043a88a036801" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.281270 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.297947 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory\") pod \"988e60e7-eeec-4335-8f34-8a0f7228dd59\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.298069 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key\") pod \"988e60e7-eeec-4335-8f34-8a0f7228dd59\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.298095 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph\") pod \"988e60e7-eeec-4335-8f34-8a0f7228dd59\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.298770 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82g5j\" (UniqueName: \"kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j\") pod \"988e60e7-eeec-4335-8f34-8a0f7228dd59\" (UID: \"988e60e7-eeec-4335-8f34-8a0f7228dd59\") " Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.330940 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph" (OuterVolumeSpecName: "ceph") pod "988e60e7-eeec-4335-8f34-8a0f7228dd59" (UID: "988e60e7-eeec-4335-8f34-8a0f7228dd59"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.331529 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j" (OuterVolumeSpecName: "kube-api-access-82g5j") pod "988e60e7-eeec-4335-8f34-8a0f7228dd59" (UID: "988e60e7-eeec-4335-8f34-8a0f7228dd59"). InnerVolumeSpecName "kube-api-access-82g5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.337225 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "988e60e7-eeec-4335-8f34-8a0f7228dd59" (UID: "988e60e7-eeec-4335-8f34-8a0f7228dd59"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.339368 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory" (OuterVolumeSpecName: "inventory") pod "988e60e7-eeec-4335-8f34-8a0f7228dd59" (UID: "988e60e7-eeec-4335-8f34-8a0f7228dd59"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.369169 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89"] Nov 29 07:38:39 crc kubenswrapper[4943]: E1129 07:38:39.376140 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="988e60e7-eeec-4335-8f34-8a0f7228dd59" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.376181 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="988e60e7-eeec-4335-8f34-8a0f7228dd59" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.376402 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="988e60e7-eeec-4335-8f34-8a0f7228dd59" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.377107 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.381659 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.381817 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.381901 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.391594 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89"] Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.401483 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.401517 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.401527 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82g5j\" (UniqueName: \"kubernetes.io/projected/988e60e7-eeec-4335-8f34-8a0f7228dd59-kube-api-access-82g5j\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.401536 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/988e60e7-eeec-4335-8f34-8a0f7228dd59-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.503792 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504227 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504269 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504341 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504426 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504492 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmrpw\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504545 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504586 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504711 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504790 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504844 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504889 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.504973 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.606749 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607488 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607620 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607673 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607702 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607742 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607799 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.607946 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.608005 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.608044 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.608131 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.608194 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.608266 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmrpw\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.611582 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.613243 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.613790 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.613869 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.613981 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.614403 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.614722 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.614945 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.615056 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.615425 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.617989 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.621623 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.639786 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmrpw\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gwj89\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:39 crc kubenswrapper[4943]: I1129 07:38:39.714753 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:38:40 crc kubenswrapper[4943]: I1129 07:38:40.294240 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89"] Nov 29 07:38:41 crc kubenswrapper[4943]: I1129 07:38:41.328907 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" event={"ID":"718531e1-453b-4e16-a497-d3af7c97b9ed","Type":"ContainerStarted","Data":"6d81dc0b90696450cf603188df4c8053c45694d284b730a3a17d0a472400cea0"} Nov 29 07:38:42 crc kubenswrapper[4943]: I1129 07:38:42.341818 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" event={"ID":"718531e1-453b-4e16-a497-d3af7c97b9ed","Type":"ContainerStarted","Data":"6659aeef9b5ce305edea6c63fd7b4bfdb493889d8884ca1e14379abbe7f8f3b6"} Nov 29 07:38:42 crc kubenswrapper[4943]: I1129 07:38:42.366136 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" podStartSLOduration=2.868939277 podStartE2EDuration="3.366106467s" podCreationTimestamp="2025-11-29 07:38:39 +0000 UTC" firstStartedPulling="2025-11-29 07:38:40.306944496 +0000 UTC m=+3895.237033259" lastFinishedPulling="2025-11-29 07:38:40.804111686 +0000 UTC m=+3895.734200449" observedRunningTime="2025-11-29 07:38:42.362462037 +0000 UTC m=+3897.292550810" watchObservedRunningTime="2025-11-29 07:38:42.366106467 +0000 UTC m=+3897.296195260" Nov 29 07:38:43 crc kubenswrapper[4943]: I1129 07:38:43.328521 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:38:43 crc kubenswrapper[4943]: E1129 07:38:43.329302 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:38:55 crc kubenswrapper[4943]: I1129 07:38:55.334151 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:38:55 crc kubenswrapper[4943]: E1129 07:38:55.335406 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:39:06 crc kubenswrapper[4943]: I1129 07:39:06.327836 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:39:06 crc kubenswrapper[4943]: E1129 07:39:06.329016 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:39:17 crc kubenswrapper[4943]: I1129 07:39:17.327863 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:39:17 crc kubenswrapper[4943]: E1129 07:39:17.328890 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:39:23 crc kubenswrapper[4943]: I1129 07:39:23.748613 4943 generic.go:334] "Generic (PLEG): container finished" podID="718531e1-453b-4e16-a497-d3af7c97b9ed" containerID="6659aeef9b5ce305edea6c63fd7b4bfdb493889d8884ca1e14379abbe7f8f3b6" exitCode=0 Nov 29 07:39:23 crc kubenswrapper[4943]: I1129 07:39:23.748665 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" event={"ID":"718531e1-453b-4e16-a497-d3af7c97b9ed","Type":"ContainerDied","Data":"6659aeef9b5ce305edea6c63fd7b4bfdb493889d8884ca1e14379abbe7f8f3b6"} Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.219643 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.315924 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.315997 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316048 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316078 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316150 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316177 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316214 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmrpw\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316253 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316295 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316311 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316337 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316362 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.316378 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle\") pod \"718531e1-453b-4e16-a497-d3af7c97b9ed\" (UID: \"718531e1-453b-4e16-a497-d3af7c97b9ed\") " Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.321954 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.322331 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.322661 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.323235 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.323416 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.325901 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw" (OuterVolumeSpecName: "kube-api-access-qmrpw") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "kube-api-access-qmrpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.325926 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.326018 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.326034 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.326073 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.326761 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph" (OuterVolumeSpecName: "ceph") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.344853 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory" (OuterVolumeSpecName: "inventory") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.348884 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "718531e1-453b-4e16-a497-d3af7c97b9ed" (UID: "718531e1-453b-4e16-a497-d3af7c97b9ed"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419818 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419857 4943 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419869 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419879 4943 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419889 4943 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419898 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419906 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419915 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419925 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419933 4943 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419942 4943 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419949 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmrpw\" (UniqueName: \"kubernetes.io/projected/718531e1-453b-4e16-a497-d3af7c97b9ed-kube-api-access-qmrpw\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.419958 4943 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718531e1-453b-4e16-a497-d3af7c97b9ed-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.766796 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" event={"ID":"718531e1-453b-4e16-a497-d3af7c97b9ed","Type":"ContainerDied","Data":"6d81dc0b90696450cf603188df4c8053c45694d284b730a3a17d0a472400cea0"} Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.766849 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gwj89" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.766874 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d81dc0b90696450cf603188df4c8053c45694d284b730a3a17d0a472400cea0" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.886693 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb"] Nov 29 07:39:25 crc kubenswrapper[4943]: E1129 07:39:25.887302 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="718531e1-453b-4e16-a497-d3af7c97b9ed" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.887340 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="718531e1-453b-4e16-a497-d3af7c97b9ed" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.887826 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="718531e1-453b-4e16-a497-d3af7c97b9ed" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.889472 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894208 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894302 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb"] Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894353 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894416 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894416 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:39:25 crc kubenswrapper[4943]: I1129 07:39:25.894740 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.030283 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.030349 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzgzw\" (UniqueName: \"kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.030667 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.030733 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.132975 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.133020 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.133105 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.133144 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzgzw\" (UniqueName: \"kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.136806 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.136926 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.137646 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.153655 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzgzw\" (UniqueName: \"kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.211389 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:26 crc kubenswrapper[4943]: I1129 07:39:26.745813 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb"] Nov 29 07:39:27 crc kubenswrapper[4943]: I1129 07:39:27.786843 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" event={"ID":"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff","Type":"ContainerStarted","Data":"dea163f4f1e91b521cd581a04e8d33c312b5be95e1458821ec631882168e8591"} Nov 29 07:39:28 crc kubenswrapper[4943]: I1129 07:39:28.799900 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" event={"ID":"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff","Type":"ContainerStarted","Data":"0efd6b246bc3a94e48c8e1fcf08cefe623a175917f771e67a1e5bdf6d9dff5de"} Nov 29 07:39:28 crc kubenswrapper[4943]: I1129 07:39:28.828529 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" podStartSLOduration=3.386608411 podStartE2EDuration="3.828498824s" podCreationTimestamp="2025-11-29 07:39:25 +0000 UTC" firstStartedPulling="2025-11-29 07:39:27.23862718 +0000 UTC m=+3942.168715933" lastFinishedPulling="2025-11-29 07:39:27.680517593 +0000 UTC m=+3942.610606346" observedRunningTime="2025-11-29 07:39:28.821776938 +0000 UTC m=+3943.751865691" watchObservedRunningTime="2025-11-29 07:39:28.828498824 +0000 UTC m=+3943.758587607" Nov 29 07:39:32 crc kubenswrapper[4943]: I1129 07:39:32.328553 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:39:32 crc kubenswrapper[4943]: E1129 07:39:32.329277 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:39:34 crc kubenswrapper[4943]: I1129 07:39:34.876740 4943 generic.go:334] "Generic (PLEG): container finished" podID="ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" containerID="0efd6b246bc3a94e48c8e1fcf08cefe623a175917f771e67a1e5bdf6d9dff5de" exitCode=0 Nov 29 07:39:34 crc kubenswrapper[4943]: I1129 07:39:34.876828 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" event={"ID":"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff","Type":"ContainerDied","Data":"0efd6b246bc3a94e48c8e1fcf08cefe623a175917f771e67a1e5bdf6d9dff5de"} Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.306058 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.443010 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key\") pod \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.443253 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph\") pod \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.443342 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory\") pod \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.443428 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzgzw\" (UniqueName: \"kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw\") pod \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\" (UID: \"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff\") " Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.449097 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw" (OuterVolumeSpecName: "kube-api-access-mzgzw") pod "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" (UID: "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff"). InnerVolumeSpecName "kube-api-access-mzgzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.449625 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph" (OuterVolumeSpecName: "ceph") pod "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" (UID: "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.470551 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" (UID: "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.472608 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory" (OuterVolumeSpecName: "inventory") pod "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" (UID: "ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.545970 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.546213 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.546273 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzgzw\" (UniqueName: \"kubernetes.io/projected/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-kube-api-access-mzgzw\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.546327 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.896626 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" event={"ID":"ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff","Type":"ContainerDied","Data":"dea163f4f1e91b521cd581a04e8d33c312b5be95e1458821ec631882168e8591"} Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.896949 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dea163f4f1e91b521cd581a04e8d33c312b5be95e1458821ec631882168e8591" Nov 29 07:39:36 crc kubenswrapper[4943]: I1129 07:39:36.896770 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.064336 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk"] Nov 29 07:39:37 crc kubenswrapper[4943]: E1129 07:39:37.064768 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.064792 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.065011 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.065792 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.069402 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.069752 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.069967 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.070390 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.070501 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.070640 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.079715 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk"] Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.163527 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.163853 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.164098 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.164269 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8826k\" (UniqueName: \"kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.164424 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.164499 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.266693 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.267122 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.267443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.267774 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.267995 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8826k\" (UniqueName: \"kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.268271 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.268762 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.274923 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.276260 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.282629 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.286475 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.300523 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8826k\" (UniqueName: \"kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-m4thk\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.381360 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:39:37 crc kubenswrapper[4943]: I1129 07:39:37.954213 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk"] Nov 29 07:39:38 crc kubenswrapper[4943]: I1129 07:39:38.918603 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" event={"ID":"7bcc7c5b-722b-40fe-a07e-789d7abf95b2","Type":"ContainerStarted","Data":"323a046561b801aa62f2113f588ad1ac0a177a10819cb3fa8df3b8fac45924a5"} Nov 29 07:39:38 crc kubenswrapper[4943]: I1129 07:39:38.918867 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" event={"ID":"7bcc7c5b-722b-40fe-a07e-789d7abf95b2","Type":"ContainerStarted","Data":"124f2c275cd927f689721fbf7b8cade62f9588e052d786108472be47f4d884c2"} Nov 29 07:39:38 crc kubenswrapper[4943]: I1129 07:39:38.939547 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" podStartSLOduration=1.464682961 podStartE2EDuration="1.939530073s" podCreationTimestamp="2025-11-29 07:39:37 +0000 UTC" firstStartedPulling="2025-11-29 07:39:37.960108898 +0000 UTC m=+3952.890197641" lastFinishedPulling="2025-11-29 07:39:38.43495599 +0000 UTC m=+3953.365044753" observedRunningTime="2025-11-29 07:39:38.933522005 +0000 UTC m=+3953.863610768" watchObservedRunningTime="2025-11-29 07:39:38.939530073 +0000 UTC m=+3953.869618826" Nov 29 07:39:44 crc kubenswrapper[4943]: I1129 07:39:44.328145 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:39:44 crc kubenswrapper[4943]: E1129 07:39:44.330141 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:39:57 crc kubenswrapper[4943]: I1129 07:39:57.327820 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:39:57 crc kubenswrapper[4943]: E1129 07:39:57.329740 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:40:11 crc kubenswrapper[4943]: I1129 07:40:11.327498 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:40:11 crc kubenswrapper[4943]: E1129 07:40:11.328320 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:40:24 crc kubenswrapper[4943]: I1129 07:40:24.327322 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:40:24 crc kubenswrapper[4943]: E1129 07:40:24.328086 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:40:36 crc kubenswrapper[4943]: I1129 07:40:36.327871 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:40:36 crc kubenswrapper[4943]: E1129 07:40:36.329971 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:40:51 crc kubenswrapper[4943]: I1129 07:40:51.327346 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:40:51 crc kubenswrapper[4943]: E1129 07:40:51.328150 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:40:53 crc kubenswrapper[4943]: I1129 07:40:53.560687 4943 scope.go:117] "RemoveContainer" containerID="c93d5eaba4cacb198593c1588189bc5219d27fa0c44d2312e45cc3b24fb77e6c" Nov 29 07:40:53 crc kubenswrapper[4943]: I1129 07:40:53.620119 4943 scope.go:117] "RemoveContainer" containerID="2a915e8ca1fd9e360b72dae899dc39e7ece272c39ea4c0800989d4ef94ecbc5b" Nov 29 07:40:53 crc kubenswrapper[4943]: I1129 07:40:53.693185 4943 scope.go:117] "RemoveContainer" containerID="2c29ed709cb7f27cefa0c417b95089ce2453cb75f396008617f78dfd2d9b407c" Nov 29 07:41:03 crc kubenswrapper[4943]: I1129 07:41:03.793555 4943 generic.go:334] "Generic (PLEG): container finished" podID="7bcc7c5b-722b-40fe-a07e-789d7abf95b2" containerID="323a046561b801aa62f2113f588ad1ac0a177a10819cb3fa8df3b8fac45924a5" exitCode=0 Nov 29 07:41:03 crc kubenswrapper[4943]: I1129 07:41:03.793675 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" event={"ID":"7bcc7c5b-722b-40fe-a07e-789d7abf95b2","Type":"ContainerDied","Data":"323a046561b801aa62f2113f588ad1ac0a177a10819cb3fa8df3b8fac45924a5"} Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.343202 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.477796 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.478592 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.478753 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.478841 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.478871 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8826k\" (UniqueName: \"kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.478920 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory\") pod \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\" (UID: \"7bcc7c5b-722b-40fe-a07e-789d7abf95b2\") " Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.490551 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph" (OuterVolumeSpecName: "ceph") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.490590 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.496919 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k" (OuterVolumeSpecName: "kube-api-access-8826k") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "kube-api-access-8826k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.509448 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.511234 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory" (OuterVolumeSpecName: "inventory") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.516361 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7bcc7c5b-722b-40fe-a07e-789d7abf95b2" (UID: "7bcc7c5b-722b-40fe-a07e-789d7abf95b2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.582719 4943 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.582915 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.582936 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.582983 4943 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.583009 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8826k\" (UniqueName: \"kubernetes.io/projected/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-kube-api-access-8826k\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.583071 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bcc7c5b-722b-40fe-a07e-789d7abf95b2-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.811551 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" event={"ID":"7bcc7c5b-722b-40fe-a07e-789d7abf95b2","Type":"ContainerDied","Data":"124f2c275cd927f689721fbf7b8cade62f9588e052d786108472be47f4d884c2"} Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.811861 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="124f2c275cd927f689721fbf7b8cade62f9588e052d786108472be47f4d884c2" Nov 29 07:41:05 crc kubenswrapper[4943]: I1129 07:41:05.811619 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-m4thk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.022329 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk"] Nov 29 07:41:06 crc kubenswrapper[4943]: E1129 07:41:06.022795 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcc7c5b-722b-40fe-a07e-789d7abf95b2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.022816 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcc7c5b-722b-40fe-a07e-789d7abf95b2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.023030 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcc7c5b-722b-40fe-a07e-789d7abf95b2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.023811 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.027089 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.027147 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.028479 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.028730 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.028519 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.029001 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.030762 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.054865 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk"] Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.193869 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.193905 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.193937 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.194018 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.194098 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.194144 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.194171 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s697r\" (UniqueName: \"kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.295912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296004 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s697r\" (UniqueName: \"kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296134 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296174 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296228 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296298 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.296426 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.326925 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:41:06 crc kubenswrapper[4943]: E1129 07:41:06.327248 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.734174 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.734257 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.734305 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.747494 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.747875 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s697r\" (UniqueName: \"kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.748051 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.748416 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:06 crc kubenswrapper[4943]: I1129 07:41:06.958048 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:41:07 crc kubenswrapper[4943]: I1129 07:41:07.494469 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk"] Nov 29 07:41:07 crc kubenswrapper[4943]: I1129 07:41:07.833229 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" event={"ID":"5d1fa725-e02d-4a45-b50b-2879d165555b","Type":"ContainerStarted","Data":"0f3a2f5cab1c238460f2b6a8687698620dcb7fd149dff90ac506562de59fc3c1"} Nov 29 07:41:08 crc kubenswrapper[4943]: I1129 07:41:08.846673 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" event={"ID":"5d1fa725-e02d-4a45-b50b-2879d165555b","Type":"ContainerStarted","Data":"9884a6b9fd0779cda21262cd5f2df8a9c29b57f6ca7eb3a06275f30ec268771c"} Nov 29 07:41:08 crc kubenswrapper[4943]: I1129 07:41:08.880256 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" podStartSLOduration=3.3750374499999998 podStartE2EDuration="3.880235957s" podCreationTimestamp="2025-11-29 07:41:05 +0000 UTC" firstStartedPulling="2025-11-29 07:41:07.50050418 +0000 UTC m=+4042.430592933" lastFinishedPulling="2025-11-29 07:41:08.005702687 +0000 UTC m=+4042.935791440" observedRunningTime="2025-11-29 07:41:08.872545359 +0000 UTC m=+4043.802634132" watchObservedRunningTime="2025-11-29 07:41:08.880235957 +0000 UTC m=+4043.810324710" Nov 29 07:41:21 crc kubenswrapper[4943]: I1129 07:41:21.328333 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:41:21 crc kubenswrapper[4943]: E1129 07:41:21.329347 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:41:34 crc kubenswrapper[4943]: I1129 07:41:34.328280 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:41:34 crc kubenswrapper[4943]: E1129 07:41:34.329432 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:41:47 crc kubenswrapper[4943]: I1129 07:41:47.327992 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:41:47 crc kubenswrapper[4943]: E1129 07:41:47.329096 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:00 crc kubenswrapper[4943]: I1129 07:42:00.328708 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:42:00 crc kubenswrapper[4943]: E1129 07:42:00.330016 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:12 crc kubenswrapper[4943]: I1129 07:42:12.327854 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:42:12 crc kubenswrapper[4943]: E1129 07:42:12.328795 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:24 crc kubenswrapper[4943]: I1129 07:42:24.328175 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:42:24 crc kubenswrapper[4943]: E1129 07:42:24.329215 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:26 crc kubenswrapper[4943]: I1129 07:42:26.601013 4943 generic.go:334] "Generic (PLEG): container finished" podID="5d1fa725-e02d-4a45-b50b-2879d165555b" containerID="9884a6b9fd0779cda21262cd5f2df8a9c29b57f6ca7eb3a06275f30ec268771c" exitCode=0 Nov 29 07:42:26 crc kubenswrapper[4943]: I1129 07:42:26.601102 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" event={"ID":"5d1fa725-e02d-4a45-b50b-2879d165555b","Type":"ContainerDied","Data":"9884a6b9fd0779cda21262cd5f2df8a9c29b57f6ca7eb3a06275f30ec268771c"} Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.044238 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214344 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214454 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214520 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s697r\" (UniqueName: \"kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214642 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214701 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214731 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.214832 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0\") pod \"5d1fa725-e02d-4a45-b50b-2879d165555b\" (UID: \"5d1fa725-e02d-4a45-b50b-2879d165555b\") " Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.220535 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph" (OuterVolumeSpecName: "ceph") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.220675 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r" (OuterVolumeSpecName: "kube-api-access-s697r") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "kube-api-access-s697r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.221031 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.240888 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.242012 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.244064 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory" (OuterVolumeSpecName: "inventory") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.244415 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "5d1fa725-e02d-4a45-b50b-2879d165555b" (UID: "5d1fa725-e02d-4a45-b50b-2879d165555b"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316702 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316736 4943 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316747 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s697r\" (UniqueName: \"kubernetes.io/projected/5d1fa725-e02d-4a45-b50b-2879d165555b-kube-api-access-s697r\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316758 4943 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316768 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316776 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.316787 4943 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d1fa725-e02d-4a45-b50b-2879d165555b-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.618128 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" event={"ID":"5d1fa725-e02d-4a45-b50b-2879d165555b","Type":"ContainerDied","Data":"0f3a2f5cab1c238460f2b6a8687698620dcb7fd149dff90ac506562de59fc3c1"} Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.618179 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f3a2f5cab1c238460f2b6a8687698620dcb7fd149dff90ac506562de59fc3c1" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.618172 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.723094 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w"] Nov 29 07:42:28 crc kubenswrapper[4943]: E1129 07:42:28.723426 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d1fa725-e02d-4a45-b50b-2879d165555b" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.723443 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d1fa725-e02d-4a45-b50b-2879d165555b" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.723671 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d1fa725-e02d-4a45-b50b-2879d165555b" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.724329 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.727091 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.727121 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.727444 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.727470 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-thpkm" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.727711 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.729139 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.740385 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w"] Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.825376 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.825728 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.825794 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.825918 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.826042 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zgkb\" (UniqueName: \"kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.826138 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.927936 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.927996 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.928047 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.928069 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.928307 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zgkb\" (UniqueName: \"kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.928367 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.932718 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.932729 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.933777 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.933905 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.934006 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:28 crc kubenswrapper[4943]: I1129 07:42:28.950251 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zgkb\" (UniqueName: \"kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-42f5w\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:29 crc kubenswrapper[4943]: I1129 07:42:29.043809 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:42:29 crc kubenswrapper[4943]: I1129 07:42:29.575971 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w"] Nov 29 07:42:29 crc kubenswrapper[4943]: I1129 07:42:29.588191 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:42:29 crc kubenswrapper[4943]: I1129 07:42:29.632247 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" event={"ID":"40fddfb3-2d70-4d01-9da7-9c2a718c4962","Type":"ContainerStarted","Data":"28e988ba59ee572879f1e12086d94062d6d479e793139e1964ba1d3d326e70ed"} Nov 29 07:42:31 crc kubenswrapper[4943]: I1129 07:42:31.653514 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" event={"ID":"40fddfb3-2d70-4d01-9da7-9c2a718c4962","Type":"ContainerStarted","Data":"2324b67be25a6c8acfbd5ac2874e7ec72ccba9c7e5e6d55df9690096c9ce6aa6"} Nov 29 07:42:31 crc kubenswrapper[4943]: I1129 07:42:31.678290 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" podStartSLOduration=2.82585808 podStartE2EDuration="3.678266038s" podCreationTimestamp="2025-11-29 07:42:28 +0000 UTC" firstStartedPulling="2025-11-29 07:42:29.587937812 +0000 UTC m=+4124.518026565" lastFinishedPulling="2025-11-29 07:42:30.44034573 +0000 UTC m=+4125.370434523" observedRunningTime="2025-11-29 07:42:31.677354625 +0000 UTC m=+4126.607443428" watchObservedRunningTime="2025-11-29 07:42:31.678266038 +0000 UTC m=+4126.608354811" Nov 29 07:42:36 crc kubenswrapper[4943]: I1129 07:42:36.327899 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:42:36 crc kubenswrapper[4943]: E1129 07:42:36.328731 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.327765 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:42:50 crc kubenswrapper[4943]: E1129 07:42:50.329700 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.740730 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.743468 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.751773 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.877012 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pphpl\" (UniqueName: \"kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.877120 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.877228 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.978578 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pphpl\" (UniqueName: \"kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.978663 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.978710 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.979114 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:50 crc kubenswrapper[4943]: I1129 07:42:50.979191 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:51 crc kubenswrapper[4943]: I1129 07:42:51.143542 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pphpl\" (UniqueName: \"kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl\") pod \"redhat-operators-c5tkv\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:51 crc kubenswrapper[4943]: I1129 07:42:51.375129 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:42:51 crc kubenswrapper[4943]: I1129 07:42:51.845162 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:42:52 crc kubenswrapper[4943]: I1129 07:42:52.864616 4943 generic.go:334] "Generic (PLEG): container finished" podID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerID="d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff" exitCode=0 Nov 29 07:42:52 crc kubenswrapper[4943]: I1129 07:42:52.864704 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerDied","Data":"d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff"} Nov 29 07:42:52 crc kubenswrapper[4943]: I1129 07:42:52.865009 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerStarted","Data":"8d29a0fff72dc55a5aa2505b0b3c49391a4b202f4fde3db07446f234c3ff2632"} Nov 29 07:42:54 crc kubenswrapper[4943]: I1129 07:42:54.883238 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerStarted","Data":"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e"} Nov 29 07:42:56 crc kubenswrapper[4943]: I1129 07:42:56.908422 4943 generic.go:334] "Generic (PLEG): container finished" podID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerID="52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e" exitCode=0 Nov 29 07:42:56 crc kubenswrapper[4943]: I1129 07:42:56.908559 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerDied","Data":"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e"} Nov 29 07:42:57 crc kubenswrapper[4943]: I1129 07:42:57.922973 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerStarted","Data":"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea"} Nov 29 07:42:57 crc kubenswrapper[4943]: I1129 07:42:57.940140 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c5tkv" podStartSLOduration=3.129834295 podStartE2EDuration="7.940113886s" podCreationTimestamp="2025-11-29 07:42:50 +0000 UTC" firstStartedPulling="2025-11-29 07:42:52.866737808 +0000 UTC m=+4147.796826591" lastFinishedPulling="2025-11-29 07:42:57.677017409 +0000 UTC m=+4152.607106182" observedRunningTime="2025-11-29 07:42:57.938990878 +0000 UTC m=+4152.869079651" watchObservedRunningTime="2025-11-29 07:42:57.940113886 +0000 UTC m=+4152.870202649" Nov 29 07:43:01 crc kubenswrapper[4943]: I1129 07:43:01.375283 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:01 crc kubenswrapper[4943]: I1129 07:43:01.375647 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:02 crc kubenswrapper[4943]: I1129 07:43:02.421737 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c5tkv" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="registry-server" probeResult="failure" output=< Nov 29 07:43:02 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:43:02 crc kubenswrapper[4943]: > Nov 29 07:43:04 crc kubenswrapper[4943]: I1129 07:43:04.327923 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:43:04 crc kubenswrapper[4943]: I1129 07:43:04.992392 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d"} Nov 29 07:43:11 crc kubenswrapper[4943]: I1129 07:43:11.442548 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:11 crc kubenswrapper[4943]: I1129 07:43:11.498735 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:11 crc kubenswrapper[4943]: I1129 07:43:11.691538 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.065256 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c5tkv" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="registry-server" containerID="cri-o://3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea" gracePeriod=2 Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.594578 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.655877 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pphpl\" (UniqueName: \"kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl\") pod \"0e202b00-aad4-49dd-91c6-75c583605a5e\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.655969 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content\") pod \"0e202b00-aad4-49dd-91c6-75c583605a5e\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.656011 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities\") pod \"0e202b00-aad4-49dd-91c6-75c583605a5e\" (UID: \"0e202b00-aad4-49dd-91c6-75c583605a5e\") " Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.657039 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities" (OuterVolumeSpecName: "utilities") pod "0e202b00-aad4-49dd-91c6-75c583605a5e" (UID: "0e202b00-aad4-49dd-91c6-75c583605a5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.662176 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl" (OuterVolumeSpecName: "kube-api-access-pphpl") pod "0e202b00-aad4-49dd-91c6-75c583605a5e" (UID: "0e202b00-aad4-49dd-91c6-75c583605a5e"). InnerVolumeSpecName "kube-api-access-pphpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.758903 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pphpl\" (UniqueName: \"kubernetes.io/projected/0e202b00-aad4-49dd-91c6-75c583605a5e-kube-api-access-pphpl\") on node \"crc\" DevicePath \"\"" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.758936 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.781984 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e202b00-aad4-49dd-91c6-75c583605a5e" (UID: "0e202b00-aad4-49dd-91c6-75c583605a5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:43:13 crc kubenswrapper[4943]: I1129 07:43:13.861086 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e202b00-aad4-49dd-91c6-75c583605a5e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.076476 4943 generic.go:334] "Generic (PLEG): container finished" podID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerID="3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea" exitCode=0 Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.076529 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerDied","Data":"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea"} Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.076594 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5tkv" event={"ID":"0e202b00-aad4-49dd-91c6-75c583605a5e","Type":"ContainerDied","Data":"8d29a0fff72dc55a5aa2505b0b3c49391a4b202f4fde3db07446f234c3ff2632"} Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.076622 4943 scope.go:117] "RemoveContainer" containerID="3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.077897 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5tkv" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.108022 4943 scope.go:117] "RemoveContainer" containerID="52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.129026 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.136488 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c5tkv"] Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.154110 4943 scope.go:117] "RemoveContainer" containerID="d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.194878 4943 scope.go:117] "RemoveContainer" containerID="3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea" Nov 29 07:43:14 crc kubenswrapper[4943]: E1129 07:43:14.195445 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea\": container with ID starting with 3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea not found: ID does not exist" containerID="3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.195503 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea"} err="failed to get container status \"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea\": rpc error: code = NotFound desc = could not find container \"3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea\": container with ID starting with 3ee7a8a772c77443f4dff2be568e0aef2f9cebdbf4380dce8ea6e21c9a2c37ea not found: ID does not exist" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.195540 4943 scope.go:117] "RemoveContainer" containerID="52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e" Nov 29 07:43:14 crc kubenswrapper[4943]: E1129 07:43:14.196103 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e\": container with ID starting with 52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e not found: ID does not exist" containerID="52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.196154 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e"} err="failed to get container status \"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e\": rpc error: code = NotFound desc = could not find container \"52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e\": container with ID starting with 52954516105ec082122b365b91cd751b92f1ba0498403665ab7c1e5c42b1d63e not found: ID does not exist" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.196195 4943 scope.go:117] "RemoveContainer" containerID="d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff" Nov 29 07:43:14 crc kubenswrapper[4943]: E1129 07:43:14.196818 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff\": container with ID starting with d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff not found: ID does not exist" containerID="d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff" Nov 29 07:43:14 crc kubenswrapper[4943]: I1129 07:43:14.196850 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff"} err="failed to get container status \"d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff\": rpc error: code = NotFound desc = could not find container \"d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff\": container with ID starting with d7e848c5b251207291b773e81cf09931c3660a75db9a82edacec6e3804a1deff not found: ID does not exist" Nov 29 07:43:15 crc kubenswrapper[4943]: I1129 07:43:15.349145 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" path="/var/lib/kubelet/pods/0e202b00-aad4-49dd-91c6-75c583605a5e/volumes" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.198997 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4"] Nov 29 07:45:00 crc kubenswrapper[4943]: E1129 07:45:00.200351 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="extract-utilities" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.200373 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="extract-utilities" Nov 29 07:45:00 crc kubenswrapper[4943]: E1129 07:45:00.200400 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="registry-server" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.200414 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="registry-server" Nov 29 07:45:00 crc kubenswrapper[4943]: E1129 07:45:00.200440 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="extract-content" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.200453 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="extract-content" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.200781 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e202b00-aad4-49dd-91c6-75c583605a5e" containerName="registry-server" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.201824 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.204663 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.208121 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.212422 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4"] Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.334085 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.334202 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.334505 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrn6t\" (UniqueName: \"kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.436065 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrn6t\" (UniqueName: \"kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.436149 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.436206 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.437605 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.451446 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.468841 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrn6t\" (UniqueName: \"kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t\") pod \"collect-profiles-29406705-t49f4\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:00 crc kubenswrapper[4943]: I1129 07:45:00.534023 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:01 crc kubenswrapper[4943]: I1129 07:45:01.035216 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4"] Nov 29 07:45:01 crc kubenswrapper[4943]: I1129 07:45:01.145661 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" event={"ID":"6c0ac0c8-7a37-405b-b60a-6c70680b7972","Type":"ContainerStarted","Data":"09c30a40d60bf4d93b01cff708746ac56de6e077bced400abdf1b96d9ae00713"} Nov 29 07:45:03 crc kubenswrapper[4943]: I1129 07:45:03.175898 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" event={"ID":"6c0ac0c8-7a37-405b-b60a-6c70680b7972","Type":"ContainerStarted","Data":"15ee8554059e5d0a40d26906efa29c4151bf9892038e51080c83f1a8f8a779c6"} Nov 29 07:45:04 crc kubenswrapper[4943]: I1129 07:45:04.186059 4943 generic.go:334] "Generic (PLEG): container finished" podID="6c0ac0c8-7a37-405b-b60a-6c70680b7972" containerID="15ee8554059e5d0a40d26906efa29c4151bf9892038e51080c83f1a8f8a779c6" exitCode=0 Nov 29 07:45:04 crc kubenswrapper[4943]: I1129 07:45:04.186173 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" event={"ID":"6c0ac0c8-7a37-405b-b60a-6c70680b7972","Type":"ContainerDied","Data":"15ee8554059e5d0a40d26906efa29c4151bf9892038e51080c83f1a8f8a779c6"} Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.520680 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.650124 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume\") pod \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.650228 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrn6t\" (UniqueName: \"kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t\") pod \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.650384 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume\") pod \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\" (UID: \"6c0ac0c8-7a37-405b-b60a-6c70680b7972\") " Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.651457 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume" (OuterVolumeSpecName: "config-volume") pod "6c0ac0c8-7a37-405b-b60a-6c70680b7972" (UID: "6c0ac0c8-7a37-405b-b60a-6c70680b7972"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.660787 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6c0ac0c8-7a37-405b-b60a-6c70680b7972" (UID: "6c0ac0c8-7a37-405b-b60a-6c70680b7972"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.660977 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t" (OuterVolumeSpecName: "kube-api-access-mrn6t") pod "6c0ac0c8-7a37-405b-b60a-6c70680b7972" (UID: "6c0ac0c8-7a37-405b-b60a-6c70680b7972"). InnerVolumeSpecName "kube-api-access-mrn6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.752741 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c0ac0c8-7a37-405b-b60a-6c70680b7972-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.752787 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c0ac0c8-7a37-405b-b60a-6c70680b7972-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 07:45:05 crc kubenswrapper[4943]: I1129 07:45:05.752802 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrn6t\" (UniqueName: \"kubernetes.io/projected/6c0ac0c8-7a37-405b-b60a-6c70680b7972-kube-api-access-mrn6t\") on node \"crc\" DevicePath \"\"" Nov 29 07:45:06 crc kubenswrapper[4943]: I1129 07:45:06.203482 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" event={"ID":"6c0ac0c8-7a37-405b-b60a-6c70680b7972","Type":"ContainerDied","Data":"09c30a40d60bf4d93b01cff708746ac56de6e077bced400abdf1b96d9ae00713"} Nov 29 07:45:06 crc kubenswrapper[4943]: I1129 07:45:06.203868 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09c30a40d60bf4d93b01cff708746ac56de6e077bced400abdf1b96d9ae00713" Nov 29 07:45:06 crc kubenswrapper[4943]: I1129 07:45:06.203610 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4" Nov 29 07:45:06 crc kubenswrapper[4943]: I1129 07:45:06.649730 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm"] Nov 29 07:45:06 crc kubenswrapper[4943]: I1129 07:45:06.657333 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406660-bz8wm"] Nov 29 07:45:07 crc kubenswrapper[4943]: I1129 07:45:07.340383 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8324b1a2-5aa0-4ed0-b6d2-91fad49082a7" path="/var/lib/kubelet/pods/8324b1a2-5aa0-4ed0-b6d2-91fad49082a7/volumes" Nov 29 07:45:32 crc kubenswrapper[4943]: I1129 07:45:32.613133 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:45:32 crc kubenswrapper[4943]: I1129 07:45:32.613797 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:45:53 crc kubenswrapper[4943]: I1129 07:45:53.860490 4943 scope.go:117] "RemoveContainer" containerID="c016d22ad0c11f6bdc334ef85079d2713d73350933b18454e601dd1c666496d3" Nov 29 07:45:55 crc kubenswrapper[4943]: I1129 07:45:55.905061 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:45:55 crc kubenswrapper[4943]: E1129 07:45:55.905775 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c0ac0c8-7a37-405b-b60a-6c70680b7972" containerName="collect-profiles" Nov 29 07:45:55 crc kubenswrapper[4943]: I1129 07:45:55.905790 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c0ac0c8-7a37-405b-b60a-6c70680b7972" containerName="collect-profiles" Nov 29 07:45:55 crc kubenswrapper[4943]: I1129 07:45:55.905943 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c0ac0c8-7a37-405b-b60a-6c70680b7972" containerName="collect-profiles" Nov 29 07:45:55 crc kubenswrapper[4943]: I1129 07:45:55.907324 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:55 crc kubenswrapper[4943]: I1129 07:45:55.924878 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.010534 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.010634 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghnr2\" (UniqueName: \"kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.010673 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.111721 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.112159 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.112222 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghnr2\" (UniqueName: \"kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.112266 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.112548 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.140958 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghnr2\" (UniqueName: \"kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2\") pod \"community-operators-khdkq\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.285692 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:45:56 crc kubenswrapper[4943]: I1129 07:45:56.781333 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:45:57 crc kubenswrapper[4943]: I1129 07:45:57.715018 4943 generic.go:334] "Generic (PLEG): container finished" podID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerID="8cf3afa644c7360e438d7d07afa57ad1b32fb903895f33f164e9040c0d640aac" exitCode=0 Nov 29 07:45:57 crc kubenswrapper[4943]: I1129 07:45:57.715143 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerDied","Data":"8cf3afa644c7360e438d7d07afa57ad1b32fb903895f33f164e9040c0d640aac"} Nov 29 07:45:57 crc kubenswrapper[4943]: I1129 07:45:57.715402 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerStarted","Data":"64d85216c2306129525b0f45e1d77481e33b6f664e87680185bef10a59d6a8d2"} Nov 29 07:46:00 crc kubenswrapper[4943]: I1129 07:46:00.743176 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerStarted","Data":"39b09d3f6c96293e4e3e64dff603ca9a908595112b0e2f46dee736c3bce7acef"} Nov 29 07:46:01 crc kubenswrapper[4943]: I1129 07:46:01.756274 4943 generic.go:334] "Generic (PLEG): container finished" podID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerID="39b09d3f6c96293e4e3e64dff603ca9a908595112b0e2f46dee736c3bce7acef" exitCode=0 Nov 29 07:46:01 crc kubenswrapper[4943]: I1129 07:46:01.756319 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerDied","Data":"39b09d3f6c96293e4e3e64dff603ca9a908595112b0e2f46dee736c3bce7acef"} Nov 29 07:46:02 crc kubenswrapper[4943]: I1129 07:46:02.613827 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:46:02 crc kubenswrapper[4943]: I1129 07:46:02.614157 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:46:03 crc kubenswrapper[4943]: I1129 07:46:03.775964 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerStarted","Data":"312d0091031e9e59c769345fffaccba17053af9ae5cb6d5e36c2281b6bb77530"} Nov 29 07:46:03 crc kubenswrapper[4943]: I1129 07:46:03.801833 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-khdkq" podStartSLOduration=3.338778617 podStartE2EDuration="8.801807696s" podCreationTimestamp="2025-11-29 07:45:55 +0000 UTC" firstStartedPulling="2025-11-29 07:45:57.717732756 +0000 UTC m=+4332.647821509" lastFinishedPulling="2025-11-29 07:46:03.180761825 +0000 UTC m=+4338.110850588" observedRunningTime="2025-11-29 07:46:03.793506142 +0000 UTC m=+4338.723594895" watchObservedRunningTime="2025-11-29 07:46:03.801807696 +0000 UTC m=+4338.731896449" Nov 29 07:46:06 crc kubenswrapper[4943]: I1129 07:46:06.286246 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:06 crc kubenswrapper[4943]: I1129 07:46:06.286720 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:06 crc kubenswrapper[4943]: I1129 07:46:06.339300 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:16 crc kubenswrapper[4943]: I1129 07:46:16.356195 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:16 crc kubenswrapper[4943]: I1129 07:46:16.430248 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:46:16 crc kubenswrapper[4943]: I1129 07:46:16.905549 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-khdkq" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="registry-server" containerID="cri-o://312d0091031e9e59c769345fffaccba17053af9ae5cb6d5e36c2281b6bb77530" gracePeriod=2 Nov 29 07:46:17 crc kubenswrapper[4943]: I1129 07:46:17.916902 4943 generic.go:334] "Generic (PLEG): container finished" podID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerID="312d0091031e9e59c769345fffaccba17053af9ae5cb6d5e36c2281b6bb77530" exitCode=0 Nov 29 07:46:17 crc kubenswrapper[4943]: I1129 07:46:17.916991 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerDied","Data":"312d0091031e9e59c769345fffaccba17053af9ae5cb6d5e36c2281b6bb77530"} Nov 29 07:46:17 crc kubenswrapper[4943]: I1129 07:46:17.917265 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khdkq" event={"ID":"7fcb9d49-3f25-4220-b870-38a8068414ec","Type":"ContainerDied","Data":"64d85216c2306129525b0f45e1d77481e33b6f664e87680185bef10a59d6a8d2"} Nov 29 07:46:17 crc kubenswrapper[4943]: I1129 07:46:17.917288 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64d85216c2306129525b0f45e1d77481e33b6f664e87680185bef10a59d6a8d2" Nov 29 07:46:17 crc kubenswrapper[4943]: I1129 07:46:17.982537 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.132285 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities\") pod \"7fcb9d49-3f25-4220-b870-38a8068414ec\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.132610 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content\") pod \"7fcb9d49-3f25-4220-b870-38a8068414ec\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.132646 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghnr2\" (UniqueName: \"kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2\") pod \"7fcb9d49-3f25-4220-b870-38a8068414ec\" (UID: \"7fcb9d49-3f25-4220-b870-38a8068414ec\") " Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.133492 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities" (OuterVolumeSpecName: "utilities") pod "7fcb9d49-3f25-4220-b870-38a8068414ec" (UID: "7fcb9d49-3f25-4220-b870-38a8068414ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.139771 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2" (OuterVolumeSpecName: "kube-api-access-ghnr2") pod "7fcb9d49-3f25-4220-b870-38a8068414ec" (UID: "7fcb9d49-3f25-4220-b870-38a8068414ec"). InnerVolumeSpecName "kube-api-access-ghnr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.180081 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fcb9d49-3f25-4220-b870-38a8068414ec" (UID: "7fcb9d49-3f25-4220-b870-38a8068414ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.234707 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.234737 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fcb9d49-3f25-4220-b870-38a8068414ec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.234748 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghnr2\" (UniqueName: \"kubernetes.io/projected/7fcb9d49-3f25-4220-b870-38a8068414ec-kube-api-access-ghnr2\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.926262 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khdkq" Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.963229 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:46:18 crc kubenswrapper[4943]: I1129 07:46:18.970543 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-khdkq"] Nov 29 07:46:19 crc kubenswrapper[4943]: I1129 07:46:19.342920 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" path="/var/lib/kubelet/pods/7fcb9d49-3f25-4220-b870-38a8068414ec/volumes" Nov 29 07:46:32 crc kubenswrapper[4943]: I1129 07:46:32.612880 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:46:32 crc kubenswrapper[4943]: I1129 07:46:32.613352 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:46:32 crc kubenswrapper[4943]: I1129 07:46:32.613400 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:46:32 crc kubenswrapper[4943]: I1129 07:46:32.614187 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:46:32 crc kubenswrapper[4943]: I1129 07:46:32.614254 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d" gracePeriod=600 Nov 29 07:46:33 crc kubenswrapper[4943]: I1129 07:46:33.051131 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d" exitCode=0 Nov 29 07:46:33 crc kubenswrapper[4943]: I1129 07:46:33.051179 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d"} Nov 29 07:46:33 crc kubenswrapper[4943]: I1129 07:46:33.051517 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4"} Nov 29 07:46:33 crc kubenswrapper[4943]: I1129 07:46:33.051539 4943 scope.go:117] "RemoveContainer" containerID="09b07071ff8ca89d679d6fdaab2353a9bf1bf65d571939901c2328edcb5dec09" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.045739 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:37 crc kubenswrapper[4943]: E1129 07:46:37.047022 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="extract-utilities" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.047039 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="extract-utilities" Nov 29 07:46:37 crc kubenswrapper[4943]: E1129 07:46:37.047061 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="registry-server" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.047067 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="registry-server" Nov 29 07:46:37 crc kubenswrapper[4943]: E1129 07:46:37.047086 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="extract-content" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.047093 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="extract-content" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.047303 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fcb9d49-3f25-4220-b870-38a8068414ec" containerName="registry-server" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.049083 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.098830 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.184776 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wd8jp\" (UniqueName: \"kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.185112 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.185256 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.286765 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wd8jp\" (UniqueName: \"kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.286857 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.286911 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.287588 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.287684 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.319024 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wd8jp\" (UniqueName: \"kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp\") pod \"redhat-marketplace-9dq4s\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.378986 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:37 crc kubenswrapper[4943]: I1129 07:46:37.903865 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:38 crc kubenswrapper[4943]: I1129 07:46:38.114688 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerStarted","Data":"cceeb853673bf45423ce3f2f760322bb92fbb3c6832113008f0138a7b405fde6"} Nov 29 07:46:39 crc kubenswrapper[4943]: I1129 07:46:39.126888 4943 generic.go:334] "Generic (PLEG): container finished" podID="8359be52-16c0-4133-9f23-634923317b77" containerID="4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae" exitCode=0 Nov 29 07:46:39 crc kubenswrapper[4943]: I1129 07:46:39.126977 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerDied","Data":"4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae"} Nov 29 07:46:40 crc kubenswrapper[4943]: I1129 07:46:40.141851 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerStarted","Data":"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396"} Nov 29 07:46:41 crc kubenswrapper[4943]: I1129 07:46:41.155091 4943 generic.go:334] "Generic (PLEG): container finished" podID="8359be52-16c0-4133-9f23-634923317b77" containerID="163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396" exitCode=0 Nov 29 07:46:41 crc kubenswrapper[4943]: I1129 07:46:41.155202 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerDied","Data":"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396"} Nov 29 07:46:42 crc kubenswrapper[4943]: I1129 07:46:42.173276 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerStarted","Data":"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b"} Nov 29 07:46:42 crc kubenswrapper[4943]: I1129 07:46:42.204467 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9dq4s" podStartSLOduration=2.665026857 podStartE2EDuration="5.204438921s" podCreationTimestamp="2025-11-29 07:46:37 +0000 UTC" firstStartedPulling="2025-11-29 07:46:39.1317382 +0000 UTC m=+4374.061826963" lastFinishedPulling="2025-11-29 07:46:41.671150274 +0000 UTC m=+4376.601239027" observedRunningTime="2025-11-29 07:46:42.197217614 +0000 UTC m=+4377.127306387" watchObservedRunningTime="2025-11-29 07:46:42.204438921 +0000 UTC m=+4377.134527674" Nov 29 07:46:47 crc kubenswrapper[4943]: I1129 07:46:47.379882 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:47 crc kubenswrapper[4943]: I1129 07:46:47.380472 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:47 crc kubenswrapper[4943]: I1129 07:46:47.448255 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:48 crc kubenswrapper[4943]: I1129 07:46:48.278324 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:48 crc kubenswrapper[4943]: I1129 07:46:48.325813 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:50 crc kubenswrapper[4943]: I1129 07:46:50.252814 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9dq4s" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="registry-server" containerID="cri-o://e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b" gracePeriod=2 Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.178524 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.244183 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities\") pod \"8359be52-16c0-4133-9f23-634923317b77\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.244406 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wd8jp\" (UniqueName: \"kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp\") pod \"8359be52-16c0-4133-9f23-634923317b77\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.244510 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content\") pod \"8359be52-16c0-4133-9f23-634923317b77\" (UID: \"8359be52-16c0-4133-9f23-634923317b77\") " Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.246468 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities" (OuterVolumeSpecName: "utilities") pod "8359be52-16c0-4133-9f23-634923317b77" (UID: "8359be52-16c0-4133-9f23-634923317b77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.264488 4943 generic.go:334] "Generic (PLEG): container finished" podID="8359be52-16c0-4133-9f23-634923317b77" containerID="e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b" exitCode=0 Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.264533 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerDied","Data":"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b"} Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.264579 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dq4s" event={"ID":"8359be52-16c0-4133-9f23-634923317b77","Type":"ContainerDied","Data":"cceeb853673bf45423ce3f2f760322bb92fbb3c6832113008f0138a7b405fde6"} Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.264600 4943 scope.go:117] "RemoveContainer" containerID="e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.264728 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dq4s" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.269148 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8359be52-16c0-4133-9f23-634923317b77" (UID: "8359be52-16c0-4133-9f23-634923317b77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.288689 4943 scope.go:117] "RemoveContainer" containerID="163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.346543 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.346593 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8359be52-16c0-4133-9f23-634923317b77-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.532430 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp" (OuterVolumeSpecName: "kube-api-access-wd8jp") pod "8359be52-16c0-4133-9f23-634923317b77" (UID: "8359be52-16c0-4133-9f23-634923317b77"). InnerVolumeSpecName "kube-api-access-wd8jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.546853 4943 scope.go:117] "RemoveContainer" containerID="4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.550488 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wd8jp\" (UniqueName: \"kubernetes.io/projected/8359be52-16c0-4133-9f23-634923317b77-kube-api-access-wd8jp\") on node \"crc\" DevicePath \"\"" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.627943 4943 scope.go:117] "RemoveContainer" containerID="e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b" Nov 29 07:46:51 crc kubenswrapper[4943]: E1129 07:46:51.628542 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b\": container with ID starting with e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b not found: ID does not exist" containerID="e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.628626 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b"} err="failed to get container status \"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b\": rpc error: code = NotFound desc = could not find container \"e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b\": container with ID starting with e222a2cabddca3735ac19ceadd0290338d31f10a1188eaf2b864497ac71fa54b not found: ID does not exist" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.628659 4943 scope.go:117] "RemoveContainer" containerID="163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396" Nov 29 07:46:51 crc kubenswrapper[4943]: E1129 07:46:51.629054 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396\": container with ID starting with 163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396 not found: ID does not exist" containerID="163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.629075 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396"} err="failed to get container status \"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396\": rpc error: code = NotFound desc = could not find container \"163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396\": container with ID starting with 163196a1352f4fdb785142335e5a24c3a798ec47d746a1b2b38421e9e7086396 not found: ID does not exist" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.629093 4943 scope.go:117] "RemoveContainer" containerID="4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae" Nov 29 07:46:51 crc kubenswrapper[4943]: E1129 07:46:51.629383 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae\": container with ID starting with 4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae not found: ID does not exist" containerID="4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.629410 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae"} err="failed to get container status \"4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae\": rpc error: code = NotFound desc = could not find container \"4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae\": container with ID starting with 4a4c63ff4f2f97a96764cb4d27f3c718dd9e7798769c2f03c63d4e4ed05652ae not found: ID does not exist" Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.675444 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:51 crc kubenswrapper[4943]: I1129 07:46:51.688077 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dq4s"] Nov 29 07:46:53 crc kubenswrapper[4943]: I1129 07:46:53.346633 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8359be52-16c0-4133-9f23-634923317b77" path="/var/lib/kubelet/pods/8359be52-16c0-4133-9f23-634923317b77/volumes" Nov 29 07:47:26 crc kubenswrapper[4943]: I1129 07:47:26.583160 4943 generic.go:334] "Generic (PLEG): container finished" podID="40fddfb3-2d70-4d01-9da7-9c2a718c4962" containerID="2324b67be25a6c8acfbd5ac2874e7ec72ccba9c7e5e6d55df9690096c9ce6aa6" exitCode=0 Nov 29 07:47:26 crc kubenswrapper[4943]: I1129 07:47:26.583282 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" event={"ID":"40fddfb3-2d70-4d01-9da7-9c2a718c4962","Type":"ContainerDied","Data":"2324b67be25a6c8acfbd5ac2874e7ec72ccba9c7e5e6d55df9690096c9ce6aa6"} Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.043863 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.230975 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zgkb\" (UniqueName: \"kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.231040 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.231092 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.231189 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.231251 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.231277 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.240826 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb" (OuterVolumeSpecName: "kube-api-access-4zgkb") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "kube-api-access-4zgkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.254840 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph" (OuterVolumeSpecName: "ceph") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.259680 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.286519 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key podName:40fddfb3-2d70-4d01-9da7-9c2a718c4962 nodeName:}" failed. No retries permitted until 2025-11-29 07:47:28.786495314 +0000 UTC m=+4423.716584067 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962") : error deleting /var/lib/kubelet/pods/40fddfb3-2d70-4d01-9da7-9c2a718c4962/volume-subpaths: remove /var/lib/kubelet/pods/40fddfb3-2d70-4d01-9da7-9c2a718c4962/volume-subpaths: no such file or directory Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.286553 4943 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory podName:40fddfb3-2d70-4d01-9da7-9c2a718c4962 nodeName:}" failed. No retries permitted until 2025-11-29 07:47:28.786545075 +0000 UTC m=+4423.716633828 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962") : error deleting /var/lib/kubelet/pods/40fddfb3-2d70-4d01-9da7-9c2a718c4962/volume-subpaths: remove /var/lib/kubelet/pods/40fddfb3-2d70-4d01-9da7-9c2a718c4962/volume-subpaths: no such file or directory Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.288928 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.334174 4943 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.334201 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.334211 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zgkb\" (UniqueName: \"kubernetes.io/projected/40fddfb3-2d70-4d01-9da7-9c2a718c4962-kube-api-access-4zgkb\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.334221 4943 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.605169 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" event={"ID":"40fddfb3-2d70-4d01-9da7-9c2a718c4962","Type":"ContainerDied","Data":"28e988ba59ee572879f1e12086d94062d6d479e793139e1964ba1d3d326e70ed"} Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.605404 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28e988ba59ee572879f1e12086d94062d6d479e793139e1964ba1d3d326e70ed" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.605503 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-42f5w" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.698986 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw"] Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.699347 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="registry-server" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699363 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="registry-server" Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.699374 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="extract-utilities" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699380 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="extract-utilities" Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.699390 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="extract-content" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699396 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="extract-content" Nov 29 07:47:28 crc kubenswrapper[4943]: E1129 07:47:28.699408 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40fddfb3-2d70-4d01-9da7-9c2a718c4962" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699415 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="40fddfb3-2d70-4d01-9da7-9c2a718c4962" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699653 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="40fddfb3-2d70-4d01-9da7-9c2a718c4962" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.699676 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="8359be52-16c0-4133-9f23-634923317b77" containerName="registry-server" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.700442 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.702725 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.702725 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.703072 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.703453 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.727359 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw"] Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.740956 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741044 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741107 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741212 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741274 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741319 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741374 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5trg\" (UniqueName: \"kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741418 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.741471 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842291 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842443 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") pod \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\" (UID: \"40fddfb3-2d70-4d01-9da7-9c2a718c4962\") " Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842674 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842710 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842757 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842786 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842819 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5trg\" (UniqueName: \"kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842848 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842880 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842903 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842922 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842942 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.842965 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.843742 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.848382 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.848477 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory" (OuterVolumeSpecName: "inventory") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.850447 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "40fddfb3-2d70-4d01-9da7-9c2a718c4962" (UID: "40fddfb3-2d70-4d01-9da7-9c2a718c4962"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.855336 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.855386 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.855741 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.855771 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.855810 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.856451 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.861377 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.866234 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.867375 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5trg\" (UniqueName: \"kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.944653 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:28 crc kubenswrapper[4943]: I1129 07:47:28.944715 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40fddfb3-2d70-4d01-9da7-9c2a718c4962-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:47:29 crc kubenswrapper[4943]: I1129 07:47:29.017251 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:47:29 crc kubenswrapper[4943]: I1129 07:47:29.566844 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw"] Nov 29 07:47:29 crc kubenswrapper[4943]: I1129 07:47:29.613068 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" event={"ID":"daf70ffe-3569-4332-8140-3cbaaa3d8db9","Type":"ContainerStarted","Data":"2ce44b7d97a83809ace8604fd19caf8a079fddffe7651c07303653597dcf7732"} Nov 29 07:47:31 crc kubenswrapper[4943]: I1129 07:47:31.632235 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" event={"ID":"daf70ffe-3569-4332-8140-3cbaaa3d8db9","Type":"ContainerStarted","Data":"58146c6ba14c95fa4e5b09392862399240351ae43d526a5a8fa4fa23de821b89"} Nov 29 07:47:31 crc kubenswrapper[4943]: I1129 07:47:31.658433 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" podStartSLOduration=2.567144618 podStartE2EDuration="3.65837963s" podCreationTimestamp="2025-11-29 07:47:28 +0000 UTC" firstStartedPulling="2025-11-29 07:47:29.557109826 +0000 UTC m=+4424.487198589" lastFinishedPulling="2025-11-29 07:47:30.648344848 +0000 UTC m=+4425.578433601" observedRunningTime="2025-11-29 07:47:31.650674662 +0000 UTC m=+4426.580763425" watchObservedRunningTime="2025-11-29 07:47:31.65837963 +0000 UTC m=+4426.588468383" Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.783364 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.786327 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.803511 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.928119 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.928220 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhvg8\" (UniqueName: \"kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:29 crc kubenswrapper[4943]: I1129 07:48:29.928411 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.030413 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.030482 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.030536 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhvg8\" (UniqueName: \"kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.031176 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.031234 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.056625 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhvg8\" (UniqueName: \"kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8\") pod \"certified-operators-8c68n\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.118594 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:30 crc kubenswrapper[4943]: I1129 07:48:30.386546 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:31 crc kubenswrapper[4943]: I1129 07:48:31.212942 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerStarted","Data":"13612a30aefdcedd01533a871a62f10253cf1142ca17f0d841a8dfff8cc8e266"} Nov 29 07:48:32 crc kubenswrapper[4943]: I1129 07:48:32.229378 4943 generic.go:334] "Generic (PLEG): container finished" podID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerID="0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca" exitCode=0 Nov 29 07:48:32 crc kubenswrapper[4943]: I1129 07:48:32.229474 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerDied","Data":"0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca"} Nov 29 07:48:32 crc kubenswrapper[4943]: I1129 07:48:32.233204 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:48:32 crc kubenswrapper[4943]: I1129 07:48:32.612939 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:48:32 crc kubenswrapper[4943]: I1129 07:48:32.613439 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:48:33 crc kubenswrapper[4943]: I1129 07:48:33.241824 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerStarted","Data":"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d"} Nov 29 07:48:34 crc kubenswrapper[4943]: I1129 07:48:34.253049 4943 generic.go:334] "Generic (PLEG): container finished" podID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerID="bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d" exitCode=0 Nov 29 07:48:34 crc kubenswrapper[4943]: I1129 07:48:34.253138 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerDied","Data":"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d"} Nov 29 07:48:35 crc kubenswrapper[4943]: I1129 07:48:35.267327 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerStarted","Data":"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259"} Nov 29 07:48:35 crc kubenswrapper[4943]: I1129 07:48:35.286879 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8c68n" podStartSLOduration=3.810062829 podStartE2EDuration="6.286852573s" podCreationTimestamp="2025-11-29 07:48:29 +0000 UTC" firstStartedPulling="2025-11-29 07:48:32.232536746 +0000 UTC m=+4487.162625539" lastFinishedPulling="2025-11-29 07:48:34.70932649 +0000 UTC m=+4489.639415283" observedRunningTime="2025-11-29 07:48:35.284832964 +0000 UTC m=+4490.214921767" watchObservedRunningTime="2025-11-29 07:48:35.286852573 +0000 UTC m=+4490.216941356" Nov 29 07:48:40 crc kubenswrapper[4943]: I1129 07:48:40.119730 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:40 crc kubenswrapper[4943]: I1129 07:48:40.120896 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:40 crc kubenswrapper[4943]: I1129 07:48:40.188328 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:40 crc kubenswrapper[4943]: I1129 07:48:40.365638 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:40 crc kubenswrapper[4943]: I1129 07:48:40.439764 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.335360 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8c68n" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="registry-server" containerID="cri-o://a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259" gracePeriod=2 Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.811052 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.892796 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities\") pod \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.893001 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhvg8\" (UniqueName: \"kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8\") pod \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.893068 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content\") pod \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\" (UID: \"20d8fbba-7173-441c-a2ad-7bfa0589bf77\") " Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.893741 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities" (OuterVolumeSpecName: "utilities") pod "20d8fbba-7173-441c-a2ad-7bfa0589bf77" (UID: "20d8fbba-7173-441c-a2ad-7bfa0589bf77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.900926 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8" (OuterVolumeSpecName: "kube-api-access-nhvg8") pod "20d8fbba-7173-441c-a2ad-7bfa0589bf77" (UID: "20d8fbba-7173-441c-a2ad-7bfa0589bf77"). InnerVolumeSpecName "kube-api-access-nhvg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.945258 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20d8fbba-7173-441c-a2ad-7bfa0589bf77" (UID: "20d8fbba-7173-441c-a2ad-7bfa0589bf77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.995675 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhvg8\" (UniqueName: \"kubernetes.io/projected/20d8fbba-7173-441c-a2ad-7bfa0589bf77-kube-api-access-nhvg8\") on node \"crc\" DevicePath \"\"" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.995711 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:48:42 crc kubenswrapper[4943]: I1129 07:48:42.995721 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d8fbba-7173-441c-a2ad-7bfa0589bf77-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.343310 4943 generic.go:334] "Generic (PLEG): container finished" podID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerID="a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259" exitCode=0 Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.343349 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerDied","Data":"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259"} Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.343383 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c68n" event={"ID":"20d8fbba-7173-441c-a2ad-7bfa0589bf77","Type":"ContainerDied","Data":"13612a30aefdcedd01533a871a62f10253cf1142ca17f0d841a8dfff8cc8e266"} Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.343403 4943 scope.go:117] "RemoveContainer" containerID="a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.343410 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c68n" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.377513 4943 scope.go:117] "RemoveContainer" containerID="bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.406138 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.407998 4943 scope.go:117] "RemoveContainer" containerID="0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.415817 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8c68n"] Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458036 4943 scope.go:117] "RemoveContainer" containerID="a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259" Nov 29 07:48:43 crc kubenswrapper[4943]: E1129 07:48:43.458377 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259\": container with ID starting with a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259 not found: ID does not exist" containerID="a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458416 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259"} err="failed to get container status \"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259\": rpc error: code = NotFound desc = could not find container \"a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259\": container with ID starting with a469316ba9df0fc8936cf7fd3e2fb429f870ccb30d508c0eae3587ed497dc259 not found: ID does not exist" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458443 4943 scope.go:117] "RemoveContainer" containerID="bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d" Nov 29 07:48:43 crc kubenswrapper[4943]: E1129 07:48:43.458701 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d\": container with ID starting with bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d not found: ID does not exist" containerID="bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458728 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d"} err="failed to get container status \"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d\": rpc error: code = NotFound desc = could not find container \"bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d\": container with ID starting with bbf9801ca616a26fa655a54e281cbf5533eb46519c3c34b86cc170933f39520d not found: ID does not exist" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458741 4943 scope.go:117] "RemoveContainer" containerID="0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca" Nov 29 07:48:43 crc kubenswrapper[4943]: E1129 07:48:43.458929 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca\": container with ID starting with 0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca not found: ID does not exist" containerID="0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca" Nov 29 07:48:43 crc kubenswrapper[4943]: I1129 07:48:43.458997 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca"} err="failed to get container status \"0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca\": rpc error: code = NotFound desc = could not find container \"0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca\": container with ID starting with 0c89807a727be15b81cf289378a58ef995ce99463aa151a131e0ea0c9de162ca not found: ID does not exist" Nov 29 07:48:45 crc kubenswrapper[4943]: I1129 07:48:45.347776 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" path="/var/lib/kubelet/pods/20d8fbba-7173-441c-a2ad-7bfa0589bf77/volumes" Nov 29 07:49:02 crc kubenswrapper[4943]: I1129 07:49:02.613299 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:49:02 crc kubenswrapper[4943]: I1129 07:49:02.613878 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.613383 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.616780 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.616849 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.617639 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.617708 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" gracePeriod=600 Nov 29 07:49:32 crc kubenswrapper[4943]: E1129 07:49:32.756251 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.840873 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" exitCode=0 Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.840934 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4"} Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.840990 4943 scope.go:117] "RemoveContainer" containerID="0932f3487aa207682e80c231bc7cf33b98e5304d94ede4b5eb1707bda3ae329d" Nov 29 07:49:32 crc kubenswrapper[4943]: I1129 07:49:32.841828 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:49:32 crc kubenswrapper[4943]: E1129 07:49:32.842090 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:49:47 crc kubenswrapper[4943]: I1129 07:49:47.328448 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:49:47 crc kubenswrapper[4943]: E1129 07:49:47.329136 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:50:00 crc kubenswrapper[4943]: I1129 07:50:00.327980 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:50:00 crc kubenswrapper[4943]: E1129 07:50:00.328731 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:50:12 crc kubenswrapper[4943]: I1129 07:50:12.327826 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:50:12 crc kubenswrapper[4943]: E1129 07:50:12.328614 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:50:24 crc kubenswrapper[4943]: I1129 07:50:24.327898 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:50:24 crc kubenswrapper[4943]: E1129 07:50:24.329298 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:50:39 crc kubenswrapper[4943]: I1129 07:50:39.327218 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:50:39 crc kubenswrapper[4943]: E1129 07:50:39.328016 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:50:53 crc kubenswrapper[4943]: I1129 07:50:53.328115 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:50:53 crc kubenswrapper[4943]: E1129 07:50:53.329030 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:07 crc kubenswrapper[4943]: I1129 07:51:07.328001 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:51:07 crc kubenswrapper[4943]: E1129 07:51:07.329003 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:15 crc kubenswrapper[4943]: I1129 07:51:15.914643 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" event={"ID":"daf70ffe-3569-4332-8140-3cbaaa3d8db9","Type":"ContainerDied","Data":"58146c6ba14c95fa4e5b09392862399240351ae43d526a5a8fa4fa23de821b89"} Nov 29 07:51:15 crc kubenswrapper[4943]: I1129 07:51:15.914698 4943 generic.go:334] "Generic (PLEG): container finished" podID="daf70ffe-3569-4332-8140-3cbaaa3d8db9" containerID="58146c6ba14c95fa4e5b09392862399240351ae43d526a5a8fa4fa23de821b89" exitCode=0 Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.368791 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.460828 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5trg\" (UniqueName: \"kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.460882 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.460943 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.460972 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461017 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461038 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461059 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461111 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461136 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461185 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.461261 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0\") pod \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\" (UID: \"daf70ffe-3569-4332-8140-3cbaaa3d8db9\") " Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.468071 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.468350 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg" (OuterVolumeSpecName: "kube-api-access-f5trg") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "kube-api-access-f5trg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.479856 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph" (OuterVolumeSpecName: "ceph") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.487891 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.490895 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.491671 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.492148 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.492310 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory" (OuterVolumeSpecName: "inventory") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.494836 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.494934 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.495898 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "daf70ffe-3569-4332-8140-3cbaaa3d8db9" (UID: "daf70ffe-3569-4332-8140-3cbaaa3d8db9"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563860 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5trg\" (UniqueName: \"kubernetes.io/projected/daf70ffe-3569-4332-8140-3cbaaa3d8db9-kube-api-access-f5trg\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563895 4943 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563905 4943 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563915 4943 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563926 4943 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.563935 4943 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.564256 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.564288 4943 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.564321 4943 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.564333 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.564342 4943 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/daf70ffe-3569-4332-8140-3cbaaa3d8db9-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.940783 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" event={"ID":"daf70ffe-3569-4332-8140-3cbaaa3d8db9","Type":"ContainerDied","Data":"2ce44b7d97a83809ace8604fd19caf8a079fddffe7651c07303653597dcf7732"} Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.940831 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ce44b7d97a83809ace8604fd19caf8a079fddffe7651c07303653597dcf7732" Nov 29 07:51:17 crc kubenswrapper[4943]: I1129 07:51:17.940859 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw" Nov 29 07:51:19 crc kubenswrapper[4943]: I1129 07:51:19.327930 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:51:19 crc kubenswrapper[4943]: E1129 07:51:19.328613 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.328073 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:51:32 crc kubenswrapper[4943]: E1129 07:51:32.329009 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.693448 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 29 07:51:32 crc kubenswrapper[4943]: E1129 07:51:32.694043 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="registry-server" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694068 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="registry-server" Nov 29 07:51:32 crc kubenswrapper[4943]: E1129 07:51:32.694094 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="extract-content" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694103 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="extract-content" Nov 29 07:51:32 crc kubenswrapper[4943]: E1129 07:51:32.694121 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf70ffe-3569-4332-8140-3cbaaa3d8db9" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694131 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf70ffe-3569-4332-8140-3cbaaa3d8db9" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 29 07:51:32 crc kubenswrapper[4943]: E1129 07:51:32.694152 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="extract-utilities" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694160 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="extract-utilities" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694405 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="20d8fbba-7173-441c-a2ad-7bfa0589bf77" containerName="registry-server" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.694435 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="daf70ffe-3569-4332-8140-3cbaaa3d8db9" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.695656 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.699199 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.705476 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.706043 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.732954 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733057 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733095 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733133 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpfgw\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-kube-api-access-mpfgw\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733157 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733180 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733229 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733254 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733282 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-run\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733340 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733370 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733392 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733418 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733438 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733459 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.733485 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.789393 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.791207 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.793850 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.805532 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835487 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835531 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-dev\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835552 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835607 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835626 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835645 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835669 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-run\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835699 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835727 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-ceph\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835747 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835767 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835791 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835807 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835826 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835844 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835863 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835878 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835895 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835934 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835960 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-sys\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.835985 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836001 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-run\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836023 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836042 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836067 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836093 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836114 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836136 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836150 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7njf2\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-kube-api-access-7njf2\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836170 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpfgw\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-kube-api-access-mpfgw\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836186 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-scripts\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836471 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.836556 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838635 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-run\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838687 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838711 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838729 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838750 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.838797 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.839048 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.839070 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d77425c2-2838-4b1e-9847-c36e8228920e-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.842078 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.842089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.842703 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.844287 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.876504 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpfgw\" (UniqueName: \"kubernetes.io/projected/d77425c2-2838-4b1e-9847-c36e8228920e-kube-api-access-mpfgw\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.946257 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d77425c2-2838-4b1e-9847-c36e8228920e-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d77425c2-2838-4b1e-9847-c36e8228920e\") " pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.947612 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.947728 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-ceph\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.947837 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.947922 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948010 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948102 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948193 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-sys\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948272 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948342 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-run\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948428 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948530 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948633 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948704 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7njf2\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-kube-api-access-7njf2\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948768 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-scripts\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948847 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-dev\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.948977 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.949715 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954644 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954695 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954728 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954748 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-run\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954868 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954890 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.954966 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-sys\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.955657 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.955776 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-dev\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.961397 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-ceph\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.961862 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.962816 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.963465 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:32 crc kubenswrapper[4943]: I1129 07:51:32.967150 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-scripts\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.005227 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7njf2\" (UniqueName: \"kubernetes.io/projected/9bda2d0c-0a79-463c-a457-1dbaf300a6f9-kube-api-access-7njf2\") pod \"cinder-backup-0\" (UID: \"9bda2d0c-0a79-463c-a457-1dbaf300a6f9\") " pod="openstack/cinder-backup-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.055862 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.112377 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.495504 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-s7pc5"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.497724 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.514855 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-s7pc5"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.568874 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.570641 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.576534 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7bfx\" (UniqueName: \"kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.576637 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.578024 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.578230 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.579349 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.582648 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-b898-account-create-update-qn2l8"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.583894 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.586624 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-kjww4" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.587229 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.598629 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-b898-account-create-update-qn2l8"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.618349 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.653618 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.655449 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.657499 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.657836 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.677801 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.677843 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.677916 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.677968 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678029 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rst2g\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678050 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkmm6\" (UniqueName: \"kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678204 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7bfx\" (UniqueName: \"kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678237 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678355 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678403 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678440 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678458 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.678537 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.680751 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.686624 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.688149 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.696780 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.697048 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.697211 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.697930 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-phxlt" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.715648 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7bfx\" (UniqueName: \"kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx\") pod \"manila-db-create-s7pc5\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.719694 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.731727 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.748394 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: E1129 07:51:33.749208 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run kube-api-access-rst2g logs public-tls-certs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="a38d5405-aba7-4164-b6de-33d61bbfd574" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.779900 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.781380 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.782870 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.782893 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.782921 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.782957 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rst2g\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.782982 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkmm6\" (UniqueName: \"kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783020 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9hxl\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783040 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783071 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783090 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783129 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783145 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783164 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783182 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783206 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783222 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783803 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.785280 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.783228 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.786800 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.786889 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.786915 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.786940 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.786967 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787004 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqgf6\" (UniqueName: \"kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787050 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787083 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787114 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787163 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.787648 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.794810 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.798777 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: E1129 07:51:33.799479 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-f9hxl logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="d1630246-a3b2-4bf0-997a-fb0a3518be62" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.799722 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.801423 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.801934 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.802131 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.808277 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.813199 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkmm6\" (UniqueName: \"kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6\") pod \"manila-b898-account-create-update-qn2l8\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.813342 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rst2g\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.829932 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.848549 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.870048 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.891496 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892018 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892103 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892183 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892294 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892380 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892446 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lftgl\" (UniqueName: \"kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892540 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892660 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892744 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892820 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892899 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqgf6\" (UniqueName: \"kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892968 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893042 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893122 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893212 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893319 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9hxl\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893396 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.893481 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.892772 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.896809 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.897405 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.897440 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.897702 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.898343 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.902955 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.903278 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.911224 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.913239 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.915302 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9hxl\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.916215 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqgf6\" (UniqueName: \"kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6\") pod \"horizon-7bfc5c9977-ggjqb\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.919040 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.920511 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:33 crc kubenswrapper[4943]: I1129 07:51:33.920525 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.003006 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.003074 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.003165 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.003195 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.003221 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lftgl\" (UniqueName: \"kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.004503 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.004783 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.005380 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.011109 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.020688 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lftgl\" (UniqueName: \"kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl\") pod \"horizon-7b8cb6d54f-fqmgs\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.068345 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.123486 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.133325 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.133350 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9bda2d0c-0a79-463c-a457-1dbaf300a6f9","Type":"ContainerStarted","Data":"22ca230dee616fb3859b1266d971849eb7a077d2a543a7da4a5c2f96de848e2c"} Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.133766 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.365394 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.437906 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.447297 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.454101 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-s7pc5"] Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.515986 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516155 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516197 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516216 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516268 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516315 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516364 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516396 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516497 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516522 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516591 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516671 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516695 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rst2g\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516716 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516760 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516825 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516861 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9hxl\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl\") pod \"d1630246-a3b2-4bf0-997a-fb0a3518be62\" (UID: \"d1630246-a3b2-4bf0-997a-fb0a3518be62\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.516929 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run\") pod \"a38d5405-aba7-4164-b6de-33d61bbfd574\" (UID: \"a38d5405-aba7-4164-b6de-33d61bbfd574\") " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.517867 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.530628 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph" (OuterVolumeSpecName: "ceph") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.530640 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts" (OuterVolumeSpecName: "scripts") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.530898 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.531182 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.533525 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl" (OuterVolumeSpecName: "kube-api-access-f9hxl") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "kube-api-access-f9hxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.537889 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data" (OuterVolumeSpecName: "config-data") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.537939 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.539262 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.539644 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs" (OuterVolumeSpecName: "logs") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.541985 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.546580 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs" (OuterVolumeSpecName: "logs") pod "d1630246-a3b2-4bf0-997a-fb0a3518be62" (UID: "d1630246-a3b2-4bf0-997a-fb0a3518be62"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.553503 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph" (OuterVolumeSpecName: "ceph") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.559266 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data" (OuterVolumeSpecName: "config-data") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.564048 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g" (OuterVolumeSpecName: "kube-api-access-rst2g") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "kube-api-access-rst2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.568513 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.594739 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts" (OuterVolumeSpecName: "scripts") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.594837 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a38d5405-aba7-4164-b6de-33d61bbfd574" (UID: "a38d5405-aba7-4164-b6de-33d61bbfd574"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.603369 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619773 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619802 4943 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619813 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619823 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619833 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619842 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619850 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619876 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619885 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619894 4943 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619902 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1630246-a3b2-4bf0-997a-fb0a3518be62-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619909 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rst2g\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-kube-api-access-rst2g\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619919 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a38d5405-aba7-4164-b6de-33d61bbfd574-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619926 4943 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1630246-a3b2-4bf0-997a-fb0a3518be62-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619933 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a38d5405-aba7-4164-b6de-33d61bbfd574-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619941 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9hxl\" (UniqueName: \"kubernetes.io/projected/d1630246-a3b2-4bf0-997a-fb0a3518be62-kube-api-access-f9hxl\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619948 4943 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a38d5405-aba7-4164-b6de-33d61bbfd574-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.619961 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.642094 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.677205 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.721945 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:34 crc kubenswrapper[4943]: I1129 07:51:34.722010 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.148438 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d77425c2-2838-4b1e-9847-c36e8228920e","Type":"ContainerStarted","Data":"5591892f85248d2a82c5db1c21920c7f4316d93d5dc57dff93972054472fe594"} Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.153289 4943 generic.go:334] "Generic (PLEG): container finished" podID="e579c42e-87cb-4c76-9a44-9dcafb37f721" containerID="f19ad05d5a9202ec37d0e1b21e4c59c9e858c45a1fbfd51bce2f9ece2e77ba3d" exitCode=0 Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.153383 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.155031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-s7pc5" event={"ID":"e579c42e-87cb-4c76-9a44-9dcafb37f721","Type":"ContainerDied","Data":"f19ad05d5a9202ec37d0e1b21e4c59c9e858c45a1fbfd51bce2f9ece2e77ba3d"} Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.155097 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-s7pc5" event={"ID":"e579c42e-87cb-4c76-9a44-9dcafb37f721","Type":"ContainerStarted","Data":"743ae75cd98ccc9523550e4b7f6d4339ab3ee27ce3625497009e368795cb677c"} Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.155168 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.203128 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-b898-account-create-update-qn2l8"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.291922 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.300882 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:51:35 crc kubenswrapper[4943]: W1129 07:51:35.302831 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20f0eeca_d432_4c9a_9e82_6cd6527e8d65.slice/crio-e885e6367b3cd5783763ad9db390101c0141ab3f8cc17ac55b737b059e4c16f9 WatchSource:0}: Error finding container e885e6367b3cd5783763ad9db390101c0141ab3f8cc17ac55b737b059e4c16f9: Status 404 returned error can't find the container with id e885e6367b3cd5783763ad9db390101c0141ab3f8cc17ac55b737b059e4c16f9 Nov 29 07:51:35 crc kubenswrapper[4943]: W1129 07:51:35.305146 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a57a9eb_7066_4bba_a3d4_b9b235dd5fd0.slice/crio-467a7ff4cdadcb619e32aee2bee955c9be4f5a1b0d3923ade2b635c25cc11326 WatchSource:0}: Error finding container 467a7ff4cdadcb619e32aee2bee955c9be4f5a1b0d3923ade2b635c25cc11326: Status 404 returned error can't find the container with id 467a7ff4cdadcb619e32aee2bee955c9be4f5a1b0d3923ade2b635c25cc11326 Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.359634 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.375247 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.384350 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.385793 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.392158 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.392453 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-kjww4" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.392608 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.392828 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.401693 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.409731 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.430662 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.437888 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.437973 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438052 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438071 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438092 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlrdg\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438112 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438159 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438189 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.438216 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.448325 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.449837 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.455187 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.455261 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.455645 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539326 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539647 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539677 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539723 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539750 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlrdg\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539790 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539814 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539839 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539865 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539891 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539914 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539940 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z45c\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.539969 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.540006 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.540036 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.540067 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.540119 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.541553 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.541692 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.542325 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.547017 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.547742 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.548241 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.548353 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.552826 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.568014 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlrdg\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.627277 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642607 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642686 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642711 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642752 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642771 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642788 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642832 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642873 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.642891 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z45c\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.643069 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.643223 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.646039 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.657242 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.659187 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.659812 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.686320 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z45c\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.687069 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.693479 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.695759 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.734071 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:35 crc kubenswrapper[4943]: I1129 07:51:35.786039 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.043279 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.101479 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.115679 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.117981 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182239 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182357 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rktlj\" (UniqueName: \"kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182420 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182584 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182624 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182664 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.182806 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.185831 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.234284 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.267780 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerStarted","Data":"467a7ff4cdadcb619e32aee2bee955c9be4f5a1b0d3923ade2b635c25cc11326"} Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.299578 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerStarted","Data":"e885e6367b3cd5783763ad9db390101c0141ab3f8cc17ac55b737b059e4c16f9"} Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.306752 4943 generic.go:334] "Generic (PLEG): container finished" podID="3a74bceb-8035-40a6-9a1f-99e60c2f4108" containerID="3d2c28e50cd6af020f8464bd70952f5ecb29d03ddeed1e4f4490b8b9a0bca011" exitCode=0 Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.306834 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b898-account-create-update-qn2l8" event={"ID":"3a74bceb-8035-40a6-9a1f-99e60c2f4108","Type":"ContainerDied","Data":"3d2c28e50cd6af020f8464bd70952f5ecb29d03ddeed1e4f4490b8b9a0bca011"} Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.306868 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b898-account-create-update-qn2l8" event={"ID":"3a74bceb-8035-40a6-9a1f-99e60c2f4108","Type":"ContainerStarted","Data":"eb1ca0fee2996c1f38dd7f7eaad0d139be96a7ea7c71be6249890f600c9a8af9"} Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.310868 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9bda2d0c-0a79-463c-a457-1dbaf300a6f9","Type":"ContainerStarted","Data":"26f0902b98f8c94ea4e31fb9aa5f8ee599615a7cd145b7ece1c9bf4f125dfa83"} Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.322291 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341371 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rktlj\" (UniqueName: \"kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341462 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341650 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341696 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341752 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.341933 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.340055 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.353142 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.360209 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.360425 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.361117 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.363783 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.366215 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.370346 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.377512 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rktlj\" (UniqueName: \"kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj\") pod \"horizon-5897fd8cd4-cfjb4\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.379210 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68cf6db5b8-5ckcr"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.380975 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.389417 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68cf6db5b8-5ckcr"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.419869 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.21143208 podStartE2EDuration="4.419847257s" podCreationTimestamp="2025-11-29 07:51:32 +0000 UTC" firstStartedPulling="2025-11-29 07:51:33.863416592 +0000 UTC m=+4668.793505345" lastFinishedPulling="2025-11-29 07:51:35.071831769 +0000 UTC m=+4670.001920522" observedRunningTime="2025-11-29 07:51:36.347659618 +0000 UTC m=+4671.277748391" watchObservedRunningTime="2025-11-29 07:51:36.419847257 +0000 UTC m=+4671.349936000" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.545807 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-tls-certs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546146 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-config-data\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546179 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-combined-ca-bundle\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546261 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-scripts\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546304 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-logs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546375 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-secret-key\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546505 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8xrr\" (UniqueName: \"kubernetes.io/projected/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-kube-api-access-q8xrr\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.546806 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648009 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-scripts\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648076 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-logs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648103 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-secret-key\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648144 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8xrr\" (UniqueName: \"kubernetes.io/projected/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-kube-api-access-q8xrr\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648206 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-tls-certs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648266 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-config-data\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.648287 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-combined-ca-bundle\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.650414 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-logs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.651511 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-config-data\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.651703 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-scripts\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.655629 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-tls-certs\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.656508 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-horizon-secret-key\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.662205 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-combined-ca-bundle\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.682844 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8xrr\" (UniqueName: \"kubernetes.io/projected/d7af4d3f-647b-437d-8ab3-d8bb4debb25a-kube-api-access-q8xrr\") pod \"horizon-68cf6db5b8-5ckcr\" (UID: \"d7af4d3f-647b-437d-8ab3-d8bb4debb25a\") " pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.714541 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.726835 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:36 crc kubenswrapper[4943]: W1129 07:51:36.724348 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod675b86e9_13da_4901_ab2e_c733d069b820.slice/crio-f98edaf1ec805be8a82c5566730f9c378477d3efae487f5539e388c59642c811 WatchSource:0}: Error finding container f98edaf1ec805be8a82c5566730f9c378477d3efae487f5539e388c59642c811: Status 404 returned error can't find the container with id f98edaf1ec805be8a82c5566730f9c378477d3efae487f5539e388c59642c811 Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.755131 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.825096 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.856085 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts\") pod \"e579c42e-87cb-4c76-9a44-9dcafb37f721\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.856332 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7bfx\" (UniqueName: \"kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx\") pod \"e579c42e-87cb-4c76-9a44-9dcafb37f721\" (UID: \"e579c42e-87cb-4c76-9a44-9dcafb37f721\") " Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.857812 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e579c42e-87cb-4c76-9a44-9dcafb37f721" (UID: "e579c42e-87cb-4c76-9a44-9dcafb37f721"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.866934 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx" (OuterVolumeSpecName: "kube-api-access-s7bfx") pod "e579c42e-87cb-4c76-9a44-9dcafb37f721" (UID: "e579c42e-87cb-4c76-9a44-9dcafb37f721"). InnerVolumeSpecName "kube-api-access-s7bfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.961391 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7bfx\" (UniqueName: \"kubernetes.io/projected/e579c42e-87cb-4c76-9a44-9dcafb37f721-kube-api-access-s7bfx\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:36 crc kubenswrapper[4943]: I1129 07:51:36.961435 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e579c42e-87cb-4c76-9a44-9dcafb37f721-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.144040 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.374593 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=4.049642356 podStartE2EDuration="5.374555402s" podCreationTimestamp="2025-11-29 07:51:32 +0000 UTC" firstStartedPulling="2025-11-29 07:51:34.630031263 +0000 UTC m=+4669.560120016" lastFinishedPulling="2025-11-29 07:51:35.954944319 +0000 UTC m=+4670.885033062" observedRunningTime="2025-11-29 07:51:37.374030949 +0000 UTC m=+4672.304119712" watchObservedRunningTime="2025-11-29 07:51:37.374555402 +0000 UTC m=+4672.304644155" Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.416937 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-s7pc5" Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.417194 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a38d5405-aba7-4164-b6de-33d61bbfd574" path="/var/lib/kubelet/pods/a38d5405-aba7-4164-b6de-33d61bbfd574/volumes" Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.417750 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1630246-a3b2-4bf0-997a-fb0a3518be62" path="/var/lib/kubelet/pods/d1630246-a3b2-4bf0-997a-fb0a3518be62/volumes" Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418238 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerStarted","Data":"f98edaf1ec805be8a82c5566730f9c378477d3efae487f5539e388c59642c811"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418270 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerStarted","Data":"b1f6ff46c41feb9f747a9f78bf6ebccba79193d16ed04d03bffc88131d9ed505"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418288 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d77425c2-2838-4b1e-9847-c36e8228920e","Type":"ContainerStarted","Data":"b8970b0353a6bb7b049607afc96490609ce9e0bad64584d3c764ed6bf9eb225b"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418300 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68cf6db5b8-5ckcr"] Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418313 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d77425c2-2838-4b1e-9847-c36e8228920e","Type":"ContainerStarted","Data":"916c91bad2f475f1314dc41284d931bc924dfe469fd6b13191f10b68b4550f09"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418323 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerStarted","Data":"2873e88902b957a54570e9d3c35a21a3730da1e363556a08519a3ab1e5031a89"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9bda2d0c-0a79-463c-a457-1dbaf300a6f9","Type":"ContainerStarted","Data":"44b9f114c9196f989c2dbee98c1757ccb0418f08567930cf7bce57f3aea37f1c"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418343 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-s7pc5" event={"ID":"e579c42e-87cb-4c76-9a44-9dcafb37f721","Type":"ContainerDied","Data":"743ae75cd98ccc9523550e4b7f6d4339ab3ee27ce3625497009e368795cb677c"} Nov 29 07:51:37 crc kubenswrapper[4943]: I1129 07:51:37.418354 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="743ae75cd98ccc9523550e4b7f6d4339ab3ee27ce3625497009e368795cb677c" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.057140 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.106228 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.115987 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.201433 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkmm6\" (UniqueName: \"kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6\") pod \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.201750 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts\") pod \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\" (UID: \"3a74bceb-8035-40a6-9a1f-99e60c2f4108\") " Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.202844 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3a74bceb-8035-40a6-9a1f-99e60c2f4108" (UID: "3a74bceb-8035-40a6-9a1f-99e60c2f4108"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.206094 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6" (OuterVolumeSpecName: "kube-api-access-lkmm6") pod "3a74bceb-8035-40a6-9a1f-99e60c2f4108" (UID: "3a74bceb-8035-40a6-9a1f-99e60c2f4108"). InnerVolumeSpecName "kube-api-access-lkmm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.303504 4943 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a74bceb-8035-40a6-9a1f-99e60c2f4108-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.303537 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkmm6\" (UniqueName: \"kubernetes.io/projected/3a74bceb-8035-40a6-9a1f-99e60c2f4108-kube-api-access-lkmm6\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.429443 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68cf6db5b8-5ckcr" event={"ID":"d7af4d3f-647b-437d-8ab3-d8bb4debb25a","Type":"ContainerStarted","Data":"8b765352b54209b76aabce7e933efd92b44addf1fa2d1630b5ad80d5e640c447"} Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.431381 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerStarted","Data":"3f95c10937bda25904fd2b7d906fc5d2be15ed62dcbc634ba1b2efc20250f734"} Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.435332 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-b898-account-create-update-qn2l8" event={"ID":"3a74bceb-8035-40a6-9a1f-99e60c2f4108","Type":"ContainerDied","Data":"eb1ca0fee2996c1f38dd7f7eaad0d139be96a7ea7c71be6249890f600c9a8af9"} Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.435364 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb1ca0fee2996c1f38dd7f7eaad0d139be96a7ea7c71be6249890f600c9a8af9" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.435682 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-b898-account-create-update-qn2l8" Nov 29 07:51:38 crc kubenswrapper[4943]: I1129 07:51:38.437918 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerStarted","Data":"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518"} Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.456505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerStarted","Data":"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129"} Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.456894 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-log" containerID="cri-o://5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" gracePeriod=30 Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.457137 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-httpd" containerID="cri-o://ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" gracePeriod=30 Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.460911 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerStarted","Data":"af1e1d65eae05cc23309d23dfe8251ce53f00b02967f619aa450c62bedd31592"} Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.460987 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-log" containerID="cri-o://3f95c10937bda25904fd2b7d906fc5d2be15ed62dcbc634ba1b2efc20250f734" gracePeriod=30 Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.461115 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-httpd" containerID="cri-o://af1e1d65eae05cc23309d23dfe8251ce53f00b02967f619aa450c62bedd31592" gracePeriod=30 Nov 29 07:51:39 crc kubenswrapper[4943]: I1129 07:51:39.496750 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.496728475 podStartE2EDuration="4.496728475s" podCreationTimestamp="2025-11-29 07:51:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:51:39.481049622 +0000 UTC m=+4674.411138385" watchObservedRunningTime="2025-11-29 07:51:39.496728475 +0000 UTC m=+4674.426817228" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.261912 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.282524 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.282509623 podStartE2EDuration="5.282509623s" podCreationTimestamp="2025-11-29 07:51:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:51:39.507717742 +0000 UTC m=+4674.437806495" watchObservedRunningTime="2025-11-29 07:51:40.282509623 +0000 UTC m=+4675.212598366" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.349901 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.349999 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350089 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350524 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350640 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350727 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350846 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.350892 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.351023 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlrdg\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg\") pod \"675b86e9-13da-4901-ab2e-c733d069b820\" (UID: \"675b86e9-13da-4901-ab2e-c733d069b820\") " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.354132 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs" (OuterVolumeSpecName: "logs") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.354559 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.356640 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg" (OuterVolumeSpecName: "kube-api-access-zlrdg") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "kube-api-access-zlrdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.357074 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.357901 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph" (OuterVolumeSpecName: "ceph") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.358104 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts" (OuterVolumeSpecName: "scripts") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.381782 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.406240 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data" (OuterVolumeSpecName: "config-data") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.431820 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "675b86e9-13da-4901-ab2e-c733d069b820" (UID: "675b86e9-13da-4901-ab2e-c733d069b820"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453561 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453623 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453636 4943 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453649 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453672 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453686 4943 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453698 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675b86e9-13da-4901-ab2e-c733d069b820-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453710 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlrdg\" (UniqueName: \"kubernetes.io/projected/675b86e9-13da-4901-ab2e-c733d069b820-kube-api-access-zlrdg\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.453719 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675b86e9-13da-4901-ab2e-c733d069b820-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473600 4943 generic.go:334] "Generic (PLEG): container finished" podID="675b86e9-13da-4901-ab2e-c733d069b820" containerID="ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" exitCode=0 Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473662 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerDied","Data":"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129"} Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473687 4943 generic.go:334] "Generic (PLEG): container finished" podID="675b86e9-13da-4901-ab2e-c733d069b820" containerID="5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" exitCode=143 Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473703 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerDied","Data":"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518"} Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473715 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"675b86e9-13da-4901-ab2e-c733d069b820","Type":"ContainerDied","Data":"f98edaf1ec805be8a82c5566730f9c378477d3efae487f5539e388c59642c811"} Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473738 4943 scope.go:117] "RemoveContainer" containerID="ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.473642 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.476736 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.487269 4943 generic.go:334] "Generic (PLEG): container finished" podID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerID="af1e1d65eae05cc23309d23dfe8251ce53f00b02967f619aa450c62bedd31592" exitCode=0 Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.487393 4943 generic.go:334] "Generic (PLEG): container finished" podID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerID="3f95c10937bda25904fd2b7d906fc5d2be15ed62dcbc634ba1b2efc20250f734" exitCode=143 Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.487458 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerDied","Data":"af1e1d65eae05cc23309d23dfe8251ce53f00b02967f619aa450c62bedd31592"} Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.487522 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerDied","Data":"3f95c10937bda25904fd2b7d906fc5d2be15ed62dcbc634ba1b2efc20250f734"} Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.512542 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.518467 4943 scope.go:117] "RemoveContainer" containerID="5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.522758 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.533270 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.534048 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a74bceb-8035-40a6-9a1f-99e60c2f4108" containerName="mariadb-account-create-update" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.534239 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a74bceb-8035-40a6-9a1f-99e60c2f4108" containerName="mariadb-account-create-update" Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.534305 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-log" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.534351 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-log" Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.534413 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e579c42e-87cb-4c76-9a44-9dcafb37f721" containerName="mariadb-database-create" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.534466 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e579c42e-87cb-4c76-9a44-9dcafb37f721" containerName="mariadb-database-create" Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.534520 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-httpd" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.534581 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-httpd" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.534831 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-httpd" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.535328 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="675b86e9-13da-4901-ab2e-c733d069b820" containerName="glance-log" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.535457 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a74bceb-8035-40a6-9a1f-99e60c2f4108" containerName="mariadb-account-create-update" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.535549 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e579c42e-87cb-4c76-9a44-9dcafb37f721" containerName="mariadb-database-create" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.536643 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.540624 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.542811 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.547606 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.553654 4943 scope.go:117] "RemoveContainer" containerID="ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.555205 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129\": container with ID starting with ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129 not found: ID does not exist" containerID="ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.555309 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129"} err="failed to get container status \"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129\": rpc error: code = NotFound desc = could not find container \"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129\": container with ID starting with ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129 not found: ID does not exist" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.555424 4943 scope.go:117] "RemoveContainer" containerID="5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" Nov 29 07:51:40 crc kubenswrapper[4943]: E1129 07:51:40.555794 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518\": container with ID starting with 5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518 not found: ID does not exist" containerID="5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.555893 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518"} err="failed to get container status \"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518\": rpc error: code = NotFound desc = could not find container \"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518\": container with ID starting with 5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518 not found: ID does not exist" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.555980 4943 scope.go:117] "RemoveContainer" containerID="ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.556269 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.556399 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129"} err="failed to get container status \"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129\": rpc error: code = NotFound desc = could not find container \"ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129\": container with ID starting with ac0ddda636f17bd28e1a48139964bb036617b2590c73c04362682ebeaeedd129 not found: ID does not exist" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.556518 4943 scope.go:117] "RemoveContainer" containerID="5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.557049 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518"} err="failed to get container status \"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518\": rpc error: code = NotFound desc = could not find container \"5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518\": container with ID starting with 5c8fdd4ef31c94627a89cb7838ac862d81ad5686ce82cde4921955d32d702518 not found: ID does not exist" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.657637 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.657677 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-scripts\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.657719 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-logs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.657745 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-config-data\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.658103 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-ceph\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.658169 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.658217 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh9tl\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-kube-api-access-sh9tl\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.658277 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.658317 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760124 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-scripts\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760226 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-logs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760265 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-config-data\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760389 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-ceph\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760423 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760454 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh9tl\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-kube-api-access-sh9tl\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760490 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760516 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.760549 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.762968 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.762987 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.763208 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34631d9e-04d9-4560-9535-16ae6c60da19-logs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.767055 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-ceph\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.767094 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.767134 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-scripts\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.777289 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-config-data\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.778270 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/34631d9e-04d9-4560-9535-16ae6c60da19-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.780534 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh9tl\" (UniqueName: \"kubernetes.io/projected/34631d9e-04d9-4560-9535-16ae6c60da19-kube-api-access-sh9tl\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.815747 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"34631d9e-04d9-4560-9535-16ae6c60da19\") " pod="openstack/glance-default-external-api-0" Nov 29 07:51:40 crc kubenswrapper[4943]: I1129 07:51:40.856107 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 07:51:41 crc kubenswrapper[4943]: I1129 07:51:41.340218 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="675b86e9-13da-4901-ab2e-c733d069b820" path="/var/lib/kubelet/pods/675b86e9-13da-4901-ab2e-c733d069b820/volumes" Nov 29 07:51:43 crc kubenswrapper[4943]: I1129 07:51:43.324910 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 29 07:51:43 crc kubenswrapper[4943]: I1129 07:51:43.338844 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.079270 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-gz9x8"] Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.081007 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.082968 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-bjj8w" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.083147 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.107686 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-gz9x8"] Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.135517 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.135664 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.135682 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.135794 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44frp\" (UniqueName: \"kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.238295 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44frp\" (UniqueName: \"kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.238434 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.238502 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.238567 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.248274 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.254257 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.256000 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.256766 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44frp\" (UniqueName: \"kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp\") pod \"manila-db-sync-gz9x8\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:44 crc kubenswrapper[4943]: I1129 07:51:44.408730 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-gz9x8" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.252147 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300514 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8z45c\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300609 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300734 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300793 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300873 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300898 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300935 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.300951 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.301011 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph\") pod \"d83ad6bc-afc3-4764-afef-fc000084ed65\" (UID: \"d83ad6bc-afc3-4764-afef-fc000084ed65\") " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.301596 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.302654 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs" (OuterVolumeSpecName: "logs") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.323256 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c" (OuterVolumeSpecName: "kube-api-access-8z45c") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "kube-api-access-8z45c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.326714 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts" (OuterVolumeSpecName: "scripts") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.327895 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:51:47 crc kubenswrapper[4943]: E1129 07:51:47.328237 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.328268 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.335117 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph" (OuterVolumeSpecName: "ceph") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.338958 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.402944 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.402979 4943 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d83ad6bc-afc3-4764-afef-fc000084ed65-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.403000 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.403011 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.403024 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.403033 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8z45c\" (UniqueName: \"kubernetes.io/projected/d83ad6bc-afc3-4764-afef-fc000084ed65-kube-api-access-8z45c\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.403041 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.447174 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.454753 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.469307 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data" (OuterVolumeSpecName: "config-data") pod "d83ad6bc-afc3-4764-afef-fc000084ed65" (UID: "d83ad6bc-afc3-4764-afef-fc000084ed65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.503960 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.504053 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.504064 4943 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d83ad6bc-afc3-4764-afef-fc000084ed65-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.558914 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d83ad6bc-afc3-4764-afef-fc000084ed65","Type":"ContainerDied","Data":"2873e88902b957a54570e9d3c35a21a3730da1e363556a08519a3ab1e5031a89"} Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.559002 4943 scope.go:117] "RemoveContainer" containerID="af1e1d65eae05cc23309d23dfe8251ce53f00b02967f619aa450c62bedd31592" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.559421 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.582071 4943 scope.go:117] "RemoveContainer" containerID="3f95c10937bda25904fd2b7d906fc5d2be15ed62dcbc634ba1b2efc20250f734" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.608309 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.619279 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.629562 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:47 crc kubenswrapper[4943]: E1129 07:51:47.629965 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-httpd" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.629984 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-httpd" Nov 29 07:51:47 crc kubenswrapper[4943]: E1129 07:51:47.630013 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-log" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.630021 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-log" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.630225 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-log" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.630249 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" containerName="glance-httpd" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.631212 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.633305 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.633610 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.659693 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.764680 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 07:51:47 crc kubenswrapper[4943]: W1129 07:51:47.782606 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34631d9e_04d9_4560_9535_16ae6c60da19.slice/crio-af3b799d6a6a554e7bf67a66908c5305b1d6d2a449ef9cd3e0fb79e78df45ffc WatchSource:0}: Error finding container af3b799d6a6a554e7bf67a66908c5305b1d6d2a449ef9cd3e0fb79e78df45ffc: Status 404 returned error can't find the container with id af3b799d6a6a554e7bf67a66908c5305b1d6d2a449ef9cd3e0fb79e78df45ffc Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.808506 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.808726 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.808803 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.808888 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.809009 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.809094 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.809232 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b72kp\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-kube-api-access-b72kp\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.809580 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-logs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.809663 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911682 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911754 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911797 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911820 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911872 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911912 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.911965 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b72kp\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-kube-api-access-b72kp\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.912114 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-logs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.912157 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.919291 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.919711 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.920121 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-logs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.920541 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.920556 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.924094 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.933092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.933092 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.940821 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b72kp\" (UniqueName: \"kubernetes.io/projected/82f5ccb7-ce13-4e07-9852-be76cbb9dda6-kube-api-access-b72kp\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:47 crc kubenswrapper[4943]: I1129 07:51:47.972443 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"82f5ccb7-ce13-4e07-9852-be76cbb9dda6\") " pod="openstack/glance-default-internal-api-0" Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.258430 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:48 crc kubenswrapper[4943]: W1129 07:51:48.484054 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62083f23_18f3_425f_bcae_74af545175f2.slice/crio-6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3 WatchSource:0}: Error finding container 6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3: Status 404 returned error can't find the container with id 6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3 Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.487383 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-gz9x8"] Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.575441 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-gz9x8" event={"ID":"62083f23-18f3-425f-bcae-74af545175f2","Type":"ContainerStarted","Data":"6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.578957 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerStarted","Data":"f302715b8d86ae30e1622c562fd6fdd51d0bb55c634282f60277b11de2219a75"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.579026 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerStarted","Data":"c540d975e2a3e06617f39b2826d11f127dabea884857f3d8ffeada387f983a29"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.579160 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bfc5c9977-ggjqb" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon-log" containerID="cri-o://c540d975e2a3e06617f39b2826d11f127dabea884857f3d8ffeada387f983a29" gracePeriod=30 Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.579253 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bfc5c9977-ggjqb" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon" containerID="cri-o://f302715b8d86ae30e1622c562fd6fdd51d0bb55c634282f60277b11de2219a75" gracePeriod=30 Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.580854 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerStarted","Data":"9885765388ac8e11b4aad70a64c50902a95703ee2b90eba6d6a0f4ba710bafde"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.584611 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34631d9e-04d9-4560-9535-16ae6c60da19","Type":"ContainerStarted","Data":"2f3c2352ad3ad01b36e67436cd59f48ec291585403b85ffe4d529ab485dde84c"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.584657 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34631d9e-04d9-4560-9535-16ae6c60da19","Type":"ContainerStarted","Data":"af3b799d6a6a554e7bf67a66908c5305b1d6d2a449ef9cd3e0fb79e78df45ffc"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.590159 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerStarted","Data":"f57733cbd771d9b14f6636e01a0b1b3660f074cc9c1d32d021e83c7a572f150a"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.590215 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerStarted","Data":"87dc35cd40cf69972273e35c7178db0754791d0c9903ace3147529c5d7e1b7a2"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.597807 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68cf6db5b8-5ckcr" event={"ID":"d7af4d3f-647b-437d-8ab3-d8bb4debb25a","Type":"ContainerStarted","Data":"9e290540792a3b2c59b37ab9513327c41b4b059d04975f6f44ee7e0cd39c8f4a"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.597860 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68cf6db5b8-5ckcr" event={"ID":"d7af4d3f-647b-437d-8ab3-d8bb4debb25a","Type":"ContainerStarted","Data":"7ad4b3594960dd0587bb502cb4d62e259c8b560753f6130684f8c69c8ae4d38b"} Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.604474 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7bfc5c9977-ggjqb" podStartSLOduration=3.068770442 podStartE2EDuration="15.604454531s" podCreationTimestamp="2025-11-29 07:51:33 +0000 UTC" firstStartedPulling="2025-11-29 07:51:35.316008369 +0000 UTC m=+4670.246097122" lastFinishedPulling="2025-11-29 07:51:47.851692428 +0000 UTC m=+4682.781781211" observedRunningTime="2025-11-29 07:51:48.597917602 +0000 UTC m=+4683.528006355" watchObservedRunningTime="2025-11-29 07:51:48.604454531 +0000 UTC m=+4683.534543284" Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.643989 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5897fd8cd4-cfjb4" podStartSLOduration=2.417759332 podStartE2EDuration="12.643969844s" podCreationTimestamp="2025-11-29 07:51:36 +0000 UTC" firstStartedPulling="2025-11-29 07:51:37.208817503 +0000 UTC m=+4672.138906256" lastFinishedPulling="2025-11-29 07:51:47.435028025 +0000 UTC m=+4682.365116768" observedRunningTime="2025-11-29 07:51:48.62205127 +0000 UTC m=+4683.552140043" watchObservedRunningTime="2025-11-29 07:51:48.643969844 +0000 UTC m=+4683.574058597" Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.648783 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68cf6db5b8-5ckcr" podStartSLOduration=2.707193096 podStartE2EDuration="12.648767771s" podCreationTimestamp="2025-11-29 07:51:36 +0000 UTC" firstStartedPulling="2025-11-29 07:51:37.448867323 +0000 UTC m=+4672.378956076" lastFinishedPulling="2025-11-29 07:51:47.390441998 +0000 UTC m=+4682.320530751" observedRunningTime="2025-11-29 07:51:48.639616107 +0000 UTC m=+4683.569704870" watchObservedRunningTime="2025-11-29 07:51:48.648767771 +0000 UTC m=+4683.578856524" Nov 29 07:51:48 crc kubenswrapper[4943]: I1129 07:51:48.815687 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.341345 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d83ad6bc-afc3-4764-afef-fc000084ed65" path="/var/lib/kubelet/pods/d83ad6bc-afc3-4764-afef-fc000084ed65/volumes" Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.628522 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"34631d9e-04d9-4560-9535-16ae6c60da19","Type":"ContainerStarted","Data":"6334051f471248b423ae7068625bc3c6430e2fb3016b53dabd7f017f2a74a9bb"} Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.641871 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"82f5ccb7-ce13-4e07-9852-be76cbb9dda6","Type":"ContainerStarted","Data":"bce0e68ec13eeee1ea6cbfabac5c6f96dc4b3bcfb537ae9d0cdab770431dede7"} Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.641922 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"82f5ccb7-ce13-4e07-9852-be76cbb9dda6","Type":"ContainerStarted","Data":"fc4c2a11cd70c9aba535c8d66267522b1adbf295eba43a207e1b6564af31bfb6"} Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.647760 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerStarted","Data":"29061c6fe75550b28c46c503b1a3d44230251766c3da812d44aa087bb7e7b9b3"} Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.648849 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7b8cb6d54f-fqmgs" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon-log" containerID="cri-o://9885765388ac8e11b4aad70a64c50902a95703ee2b90eba6d6a0f4ba710bafde" gracePeriod=30 Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.649202 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7b8cb6d54f-fqmgs" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon" containerID="cri-o://29061c6fe75550b28c46c503b1a3d44230251766c3da812d44aa087bb7e7b9b3" gracePeriod=30 Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.650542 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.650531462 podStartE2EDuration="9.650531462s" podCreationTimestamp="2025-11-29 07:51:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:51:49.649711172 +0000 UTC m=+4684.579799945" watchObservedRunningTime="2025-11-29 07:51:49.650531462 +0000 UTC m=+4684.580620215" Nov 29 07:51:49 crc kubenswrapper[4943]: I1129 07:51:49.684244 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7b8cb6d54f-fqmgs" podStartSLOduration=4.007734823 podStartE2EDuration="16.684172412s" podCreationTimestamp="2025-11-29 07:51:33 +0000 UTC" firstStartedPulling="2025-11-29 07:51:35.306005575 +0000 UTC m=+4670.236094328" lastFinishedPulling="2025-11-29 07:51:47.982443164 +0000 UTC m=+4682.912531917" observedRunningTime="2025-11-29 07:51:49.674897236 +0000 UTC m=+4684.604985989" watchObservedRunningTime="2025-11-29 07:51:49.684172412 +0000 UTC m=+4684.614261165" Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.663544 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"82f5ccb7-ce13-4e07-9852-be76cbb9dda6","Type":"ContainerStarted","Data":"0674d5809d6593b5ccb363ed52dfa4c88dcc5a40949c87659b9fe77b7b3f5bae"} Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.718220 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.718197659 podStartE2EDuration="3.718197659s" podCreationTimestamp="2025-11-29 07:51:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:51:50.691211702 +0000 UTC m=+4685.621300535" watchObservedRunningTime="2025-11-29 07:51:50.718197659 +0000 UTC m=+4685.648286432" Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.857322 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.857390 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.894976 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 07:51:50 crc kubenswrapper[4943]: I1129 07:51:50.916143 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 07:51:51 crc kubenswrapper[4943]: I1129 07:51:51.674274 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 07:51:51 crc kubenswrapper[4943]: I1129 07:51:51.674676 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 07:51:54 crc kubenswrapper[4943]: I1129 07:51:54.069769 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:51:54 crc kubenswrapper[4943]: I1129 07:51:54.124799 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.549474 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.550510 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.728185 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.728273 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.736894 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-gz9x8" event={"ID":"62083f23-18f3-425f-bcae-74af545175f2","Type":"ContainerStarted","Data":"4a58ef8fc7275beff7a920905e2f97780c3ba1a377778c93111b8828abdc6466"} Nov 29 07:51:56 crc kubenswrapper[4943]: I1129 07:51:56.773787 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-gz9x8" podStartSLOduration=6.322029695 podStartE2EDuration="12.773771331s" podCreationTimestamp="2025-11-29 07:51:44 +0000 UTC" firstStartedPulling="2025-11-29 07:51:48.48785898 +0000 UTC m=+4683.417947733" lastFinishedPulling="2025-11-29 07:51:54.939600576 +0000 UTC m=+4689.869689369" observedRunningTime="2025-11-29 07:51:56.771710871 +0000 UTC m=+4691.701799664" watchObservedRunningTime="2025-11-29 07:51:56.773771331 +0000 UTC m=+4691.703860074" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.258735 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.259156 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.304140 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.328178 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:51:58 crc kubenswrapper[4943]: E1129 07:51:58.328480 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.417891 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.752427 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 07:51:58 crc kubenswrapper[4943]: I1129 07:51:58.752488 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 07:52:01 crc kubenswrapper[4943]: I1129 07:52:01.425701 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 07:52:01 crc kubenswrapper[4943]: I1129 07:52:01.426119 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 07:52:06 crc kubenswrapper[4943]: I1129 07:52:06.550635 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.0:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.0:8443: connect: connection refused" Nov 29 07:52:06 crc kubenswrapper[4943]: I1129 07:52:06.730255 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68cf6db5b8-5ckcr" podUID="d7af4d3f-647b-437d-8ab3-d8bb4debb25a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.1:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.1:8443: connect: connection refused" Nov 29 07:52:09 crc kubenswrapper[4943]: I1129 07:52:09.866438 4943 generic.go:334] "Generic (PLEG): container finished" podID="62083f23-18f3-425f-bcae-74af545175f2" containerID="4a58ef8fc7275beff7a920905e2f97780c3ba1a377778c93111b8828abdc6466" exitCode=0 Nov 29 07:52:09 crc kubenswrapper[4943]: I1129 07:52:09.866600 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-gz9x8" event={"ID":"62083f23-18f3-425f-bcae-74af545175f2","Type":"ContainerDied","Data":"4a58ef8fc7275beff7a920905e2f97780c3ba1a377778c93111b8828abdc6466"} Nov 29 07:52:10 crc kubenswrapper[4943]: I1129 07:52:10.328126 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:52:10 crc kubenswrapper[4943]: E1129 07:52:10.328892 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.441279 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-gz9x8" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.570835 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data\") pod \"62083f23-18f3-425f-bcae-74af545175f2\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.571087 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44frp\" (UniqueName: \"kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp\") pod \"62083f23-18f3-425f-bcae-74af545175f2\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.571152 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle\") pod \"62083f23-18f3-425f-bcae-74af545175f2\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.571259 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data\") pod \"62083f23-18f3-425f-bcae-74af545175f2\" (UID: \"62083f23-18f3-425f-bcae-74af545175f2\") " Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.582029 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "62083f23-18f3-425f-bcae-74af545175f2" (UID: "62083f23-18f3-425f-bcae-74af545175f2"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.582927 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp" (OuterVolumeSpecName: "kube-api-access-44frp") pod "62083f23-18f3-425f-bcae-74af545175f2" (UID: "62083f23-18f3-425f-bcae-74af545175f2"). InnerVolumeSpecName "kube-api-access-44frp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.585618 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data" (OuterVolumeSpecName: "config-data") pod "62083f23-18f3-425f-bcae-74af545175f2" (UID: "62083f23-18f3-425f-bcae-74af545175f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.630710 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62083f23-18f3-425f-bcae-74af545175f2" (UID: "62083f23-18f3-425f-bcae-74af545175f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.675665 4943 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.675707 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44frp\" (UniqueName: \"kubernetes.io/projected/62083f23-18f3-425f-bcae-74af545175f2-kube-api-access-44frp\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.675722 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.675739 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62083f23-18f3-425f-bcae-74af545175f2-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.892300 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-gz9x8" event={"ID":"62083f23-18f3-425f-bcae-74af545175f2","Type":"ContainerDied","Data":"6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3"} Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.892345 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b42ff502ea7307ebf6b751fa4b64c7669d279bb8238486e1d929446d7fbdac3" Nov 29 07:52:11 crc kubenswrapper[4943]: I1129 07:52:11.892411 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-gz9x8" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.227799 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: E1129 07:52:12.228637 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62083f23-18f3-425f-bcae-74af545175f2" containerName="manila-db-sync" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.228657 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="62083f23-18f3-425f-bcae-74af545175f2" containerName="manila-db-sync" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.228887 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="62083f23-18f3-425f-bcae-74af545175f2" containerName="manila-db-sync" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.230106 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.235269 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.235298 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-bjj8w" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.235493 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.239059 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.240246 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.241817 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.251792 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.261962 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.294252 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.364211 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-mtv4l"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.365775 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.373886 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-mtv4l"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394818 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394855 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gft7\" (UniqueName: \"kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394880 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394906 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394927 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394947 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xjwr\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394967 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.394988 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395014 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395043 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395070 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395084 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395100 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.395122 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497281 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497320 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497350 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7p6x\" (UniqueName: \"kubernetes.io/projected/b383eee2-b374-463b-8cdc-429b84772fcf-kube-api-access-v7p6x\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497380 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497435 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497463 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497538 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-config\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497551 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497557 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497604 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497744 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497739 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gft7\" (UniqueName: \"kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497788 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497805 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497899 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497937 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.497980 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xjwr\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498071 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498724 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498790 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498889 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.498924 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.595116 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.596734 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.601011 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602327 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-config\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602399 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602430 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602527 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602577 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7p6x\" (UniqueName: \"kubernetes.io/projected/b383eee2-b374-463b-8cdc-429b84772fcf-kube-api-access-v7p6x\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.602611 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.603289 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-config\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.603366 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.603996 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.604365 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.607196 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b383eee2-b374-463b-8cdc-429b84772fcf-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.617962 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704648 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704730 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704785 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704821 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704848 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.704877 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.705170 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84skb\" (UniqueName: \"kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.733639 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.733904 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.734186 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.735332 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.735465 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.735611 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.735873 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.735879 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.736267 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gft7\" (UniqueName: \"kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7\") pod \"manila-scheduler-0\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.736322 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.736726 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xjwr\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr\") pod \"manila-share-share1-0\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.737457 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7p6x\" (UniqueName: \"kubernetes.io/projected/b383eee2-b374-463b-8cdc-429b84772fcf-kube-api-access-v7p6x\") pod \"dnsmasq-dns-76b5fdb995-mtv4l\" (UID: \"b383eee2-b374-463b-8cdc-429b84772fcf\") " pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807315 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84skb\" (UniqueName: \"kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807717 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807748 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807799 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807828 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807850 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807875 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.807968 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.808400 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.813213 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.813743 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.814040 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.814490 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.832674 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84skb\" (UniqueName: \"kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb\") pod \"manila-api-0\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.860800 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.903050 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.918731 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:12 crc kubenswrapper[4943]: I1129 07:52:12.991790 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.575895 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.605197 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.766154 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.766986 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.773843 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.814852 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-mtv4l"] Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.970112 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerStarted","Data":"af60e672d6a65ec7647941c1f8ceb3e87c0562ac6e3243b25e88e5219e6fd089"} Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.971863 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" event={"ID":"b383eee2-b374-463b-8cdc-429b84772fcf","Type":"ContainerStarted","Data":"8945f9f44a8adf8cb6fe243dda8e7d0229e98860b877532d5ae25f1b1100c4f1"} Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.973203 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerStarted","Data":"86317e3c0c2e45cb05b9ce44c7cbfff5232efb8bd9046a0a29a051a6a04e6821"} Nov 29 07:52:13 crc kubenswrapper[4943]: I1129 07:52:13.975036 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerStarted","Data":"0c05507f35a55c52c201548b776f611a89ef8feb2c14a18bd6b082eede56184d"} Nov 29 07:52:14 crc kubenswrapper[4943]: I1129 07:52:14.996914 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerStarted","Data":"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171"} Nov 29 07:52:14 crc kubenswrapper[4943]: I1129 07:52:14.997441 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerStarted","Data":"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9"} Nov 29 07:52:14 crc kubenswrapper[4943]: I1129 07:52:14.998911 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 29 07:52:15 crc kubenswrapper[4943]: I1129 07:52:15.009257 4943 generic.go:334] "Generic (PLEG): container finished" podID="b383eee2-b374-463b-8cdc-429b84772fcf" containerID="5a71f6b8a82773a4f63373b0463e7c777176fe31e16e2c7d465e94964fac7f86" exitCode=0 Nov 29 07:52:15 crc kubenswrapper[4943]: I1129 07:52:15.009349 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" event={"ID":"b383eee2-b374-463b-8cdc-429b84772fcf","Type":"ContainerDied","Data":"5a71f6b8a82773a4f63373b0463e7c777176fe31e16e2c7d465e94964fac7f86"} Nov 29 07:52:15 crc kubenswrapper[4943]: I1129 07:52:15.023746 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerStarted","Data":"501976e6a8d58428e202b991df1b1501a92c9b75aee73624a132e4fa89d19d8b"} Nov 29 07:52:15 crc kubenswrapper[4943]: I1129 07:52:15.039032 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=3.039011018 podStartE2EDuration="3.039011018s" podCreationTimestamp="2025-11-29 07:52:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:52:15.024068735 +0000 UTC m=+4709.954157478" watchObservedRunningTime="2025-11-29 07:52:15.039011018 +0000 UTC m=+4709.969099771" Nov 29 07:52:15 crc kubenswrapper[4943]: I1129 07:52:15.689764 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:16 crc kubenswrapper[4943]: I1129 07:52:16.036427 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" event={"ID":"b383eee2-b374-463b-8cdc-429b84772fcf","Type":"ContainerStarted","Data":"b069996f81fa5244b4edcb20a259dbacbbca9ad28e71f1dfbde0c0423c35d2e7"} Nov 29 07:52:16 crc kubenswrapper[4943]: I1129 07:52:16.036591 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:16 crc kubenswrapper[4943]: I1129 07:52:16.047453 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerStarted","Data":"93789fd41c1636f91afe1d181bd5590dcf8d91b83219bab82782f916a92c687d"} Nov 29 07:52:16 crc kubenswrapper[4943]: I1129 07:52:16.057476 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" podStartSLOduration=4.057456946 podStartE2EDuration="4.057456946s" podCreationTimestamp="2025-11-29 07:52:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:52:16.055920549 +0000 UTC m=+4710.986009302" watchObservedRunningTime="2025-11-29 07:52:16.057456946 +0000 UTC m=+4710.987545709" Nov 29 07:52:16 crc kubenswrapper[4943]: I1129 07:52:16.075915 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.463284786 podStartE2EDuration="4.075899085s" podCreationTimestamp="2025-11-29 07:52:12 +0000 UTC" firstStartedPulling="2025-11-29 07:52:13.743839097 +0000 UTC m=+4708.673927850" lastFinishedPulling="2025-11-29 07:52:14.356453406 +0000 UTC m=+4709.286542149" observedRunningTime="2025-11-29 07:52:16.073384744 +0000 UTC m=+4711.003473497" watchObservedRunningTime="2025-11-29 07:52:16.075899085 +0000 UTC m=+4711.005987838" Nov 29 07:52:17 crc kubenswrapper[4943]: I1129 07:52:17.057378 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api-log" containerID="cri-o://e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" gracePeriod=30 Nov 29 07:52:17 crc kubenswrapper[4943]: I1129 07:52:17.057441 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api" containerID="cri-o://e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" gracePeriod=30 Nov 29 07:52:17 crc kubenswrapper[4943]: I1129 07:52:17.935949 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061174 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061218 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061284 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061309 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061388 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84skb\" (UniqueName: \"kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061436 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061549 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle\") pod \"66b49838-0d56-4d8c-9fc5-d06629831630\" (UID: \"66b49838-0d56-4d8c-9fc5-d06629831630\") " Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.061798 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs" (OuterVolumeSpecName: "logs") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.062010 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66b49838-0d56-4d8c-9fc5-d06629831630-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.062548 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.069182 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb" (OuterVolumeSpecName: "kube-api-access-84skb") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "kube-api-access-84skb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.070360 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.090727 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts" (OuterVolumeSpecName: "scripts") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.093944 4943 generic.go:334] "Generic (PLEG): container finished" podID="66b49838-0d56-4d8c-9fc5-d06629831630" containerID="e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" exitCode=0 Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094180 4943 generic.go:334] "Generic (PLEG): container finished" podID="66b49838-0d56-4d8c-9fc5-d06629831630" containerID="e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" exitCode=143 Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094072 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerDied","Data":"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171"} Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094439 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerDied","Data":"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9"} Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094505 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"66b49838-0d56-4d8c-9fc5-d06629831630","Type":"ContainerDied","Data":"af60e672d6a65ec7647941c1f8ceb3e87c0562ac6e3243b25e88e5219e6fd089"} Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094585 4943 scope.go:117] "RemoveContainer" containerID="e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.094023 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.132856 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.143767 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data" (OuterVolumeSpecName: "config-data") pod "66b49838-0d56-4d8c-9fc5-d06629831630" (UID: "66b49838-0d56-4d8c-9fc5-d06629831630"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.148214 4943 scope.go:117] "RemoveContainer" containerID="e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.163774 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.163934 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.164004 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.164063 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/66b49838-0d56-4d8c-9fc5-d06629831630-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.164113 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66b49838-0d56-4d8c-9fc5-d06629831630-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.164169 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84skb\" (UniqueName: \"kubernetes.io/projected/66b49838-0d56-4d8c-9fc5-d06629831630-kube-api-access-84skb\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.167382 4943 scope.go:117] "RemoveContainer" containerID="e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" Nov 29 07:52:18 crc kubenswrapper[4943]: E1129 07:52:18.168008 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171\": container with ID starting with e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171 not found: ID does not exist" containerID="e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.168096 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171"} err="failed to get container status \"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171\": rpc error: code = NotFound desc = could not find container \"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171\": container with ID starting with e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171 not found: ID does not exist" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.168167 4943 scope.go:117] "RemoveContainer" containerID="e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" Nov 29 07:52:18 crc kubenswrapper[4943]: E1129 07:52:18.168763 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9\": container with ID starting with e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9 not found: ID does not exist" containerID="e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.168834 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9"} err="failed to get container status \"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9\": rpc error: code = NotFound desc = could not find container \"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9\": container with ID starting with e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9 not found: ID does not exist" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.168906 4943 scope.go:117] "RemoveContainer" containerID="e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.173145 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171"} err="failed to get container status \"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171\": rpc error: code = NotFound desc = could not find container \"e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171\": container with ID starting with e9e69e02476d7a813ff106d9c57fa5aebb9ee4d77af0f4950d10c9c380bb9171 not found: ID does not exist" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.173409 4943 scope.go:117] "RemoveContainer" containerID="e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.173719 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9"} err="failed to get container status \"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9\": rpc error: code = NotFound desc = could not find container \"e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9\": container with ID starting with e9ad263afd0f835221b318b94116ef54f2ce686094b2813a6659bf35618fbdb9 not found: ID does not exist" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.429295 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.441211 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.460176 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:18 crc kubenswrapper[4943]: E1129 07:52:18.460663 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.460684 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api" Nov 29 07:52:18 crc kubenswrapper[4943]: E1129 07:52:18.460702 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api-log" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.460709 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api-log" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.460920 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.460934 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" containerName="manila-api-log" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.461933 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.465326 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.465686 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.469595 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.472062 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571001 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data-custom\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571039 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-scripts\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571076 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128c6e68-82d5-4b0b-9734-3f86bd29385e-etc-machine-id\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571154 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zblqs\" (UniqueName: \"kubernetes.io/projected/128c6e68-82d5-4b0b-9734-3f86bd29385e-kube-api-access-zblqs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571185 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-public-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571214 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/128c6e68-82d5-4b0b-9734-3f86bd29385e-logs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571234 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571249 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-internal-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.571284 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672667 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672739 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-scripts\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672764 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data-custom\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672795 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128c6e68-82d5-4b0b-9734-3f86bd29385e-etc-machine-id\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672861 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zblqs\" (UniqueName: \"kubernetes.io/projected/128c6e68-82d5-4b0b-9734-3f86bd29385e-kube-api-access-zblqs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672894 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-public-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672920 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/128c6e68-82d5-4b0b-9734-3f86bd29385e-logs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.672989 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128c6e68-82d5-4b0b-9734-3f86bd29385e-etc-machine-id\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.673081 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.673556 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/128c6e68-82d5-4b0b-9734-3f86bd29385e-logs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.673707 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-internal-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.678728 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-internal-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.679358 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.680520 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-public-tls-certs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.682205 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-config-data-custom\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.683228 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.690447 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zblqs\" (UniqueName: \"kubernetes.io/projected/128c6e68-82d5-4b0b-9734-3f86bd29385e-kube-api-access-zblqs\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.697051 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128c6e68-82d5-4b0b-9734-3f86bd29385e-scripts\") pod \"manila-api-0\" (UID: \"128c6e68-82d5-4b0b-9734-3f86bd29385e\") " pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.777237 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.923634 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.924016 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-central-agent" containerID="cri-o://b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6" gracePeriod=30 Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.924062 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="proxy-httpd" containerID="cri-o://d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16" gracePeriod=30 Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.924094 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-notification-agent" containerID="cri-o://cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7" gracePeriod=30 Nov 29 07:52:18 crc kubenswrapper[4943]: I1129 07:52:18.924080 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="sg-core" containerID="cri-o://4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803" gracePeriod=30 Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.092889 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.109229 4943 generic.go:334] "Generic (PLEG): container finished" podID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerID="d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16" exitCode=0 Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.109266 4943 generic.go:334] "Generic (PLEG): container finished" podID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerID="4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803" exitCode=2 Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.109316 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerDied","Data":"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16"} Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.109340 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerDied","Data":"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803"} Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.116697 4943 generic.go:334] "Generic (PLEG): container finished" podID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerID="f302715b8d86ae30e1622c562fd6fdd51d0bb55c634282f60277b11de2219a75" exitCode=137 Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.116720 4943 generic.go:334] "Generic (PLEG): container finished" podID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerID="c540d975e2a3e06617f39b2826d11f127dabea884857f3d8ffeada387f983a29" exitCode=137 Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.116735 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerDied","Data":"f302715b8d86ae30e1622c562fd6fdd51d0bb55c634282f60277b11de2219a75"} Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.116754 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerDied","Data":"c540d975e2a3e06617f39b2826d11f127dabea884857f3d8ffeada387f983a29"} Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.131656 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:52:19 crc kubenswrapper[4943]: I1129 07:52:19.360404 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66b49838-0d56-4d8c-9fc5-d06629831630" path="/var/lib/kubelet/pods/66b49838-0d56-4d8c-9fc5-d06629831630/volumes" Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.128881 4943 generic.go:334] "Generic (PLEG): container finished" podID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerID="b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6" exitCode=0 Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.128960 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerDied","Data":"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6"} Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.131259 4943 generic.go:334] "Generic (PLEG): container finished" podID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerID="29061c6fe75550b28c46c503b1a3d44230251766c3da812d44aa087bb7e7b9b3" exitCode=137 Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.131286 4943 generic.go:334] "Generic (PLEG): container finished" podID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerID="9885765388ac8e11b4aad70a64c50902a95703ee2b90eba6d6a0f4ba710bafde" exitCode=137 Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.131308 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerDied","Data":"29061c6fe75550b28c46c503b1a3d44230251766c3da812d44aa087bb7e7b9b3"} Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.131336 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerDied","Data":"9885765388ac8e11b4aad70a64c50902a95703ee2b90eba6d6a0f4ba710bafde"} Nov 29 07:52:20 crc kubenswrapper[4943]: I1129 07:52:20.878628 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:52:21 crc kubenswrapper[4943]: I1129 07:52:21.147965 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-68cf6db5b8-5ckcr" Nov 29 07:52:21 crc kubenswrapper[4943]: I1129 07:52:21.246513 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:52:21 crc kubenswrapper[4943]: I1129 07:52:21.246741 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon-log" containerID="cri-o://87dc35cd40cf69972273e35c7178db0754791d0c9903ace3147529c5d7e1b7a2" gracePeriod=30 Nov 29 07:52:21 crc kubenswrapper[4943]: I1129 07:52:21.247189 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" containerID="cri-o://f57733cbd771d9b14f6636e01a0b1b3660f074cc9c1d32d021e83c7a572f150a" gracePeriod=30 Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.494744 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.557198 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqgf6\" (UniqueName: \"kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6\") pod \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.557270 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data\") pod \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.557392 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key\") pod \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.557513 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs\") pod \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.557620 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts\") pod \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\" (UID: \"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.559211 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs" (OuterVolumeSpecName: "logs") pod "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" (UID: "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.564260 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6" (OuterVolumeSpecName: "kube-api-access-lqgf6") pod "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" (UID: "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0"). InnerVolumeSpecName "kube-api-access-lqgf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.574480 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" (UID: "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.616942 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data" (OuterVolumeSpecName: "config-data") pod "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" (UID: "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.629404 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts" (OuterVolumeSpecName: "scripts") pod "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" (UID: "4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.653097 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.659880 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.659907 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.659918 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqgf6\" (UniqueName: \"kubernetes.io/projected/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-kube-api-access-lqgf6\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.659928 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.659963 4943 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.761521 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts\") pod \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.761618 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key\") pod \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.761693 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs\") pod \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.761718 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lftgl\" (UniqueName: \"kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl\") pod \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.761753 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data\") pod \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\" (UID: \"20f0eeca-d432-4c9a-9e82-6cd6527e8d65\") " Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.762478 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs" (OuterVolumeSpecName: "logs") pod "20f0eeca-d432-4c9a-9e82-6cd6527e8d65" (UID: "20f0eeca-d432-4c9a-9e82-6cd6527e8d65"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.766157 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "20f0eeca-d432-4c9a-9e82-6cd6527e8d65" (UID: "20f0eeca-d432-4c9a-9e82-6cd6527e8d65"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.767345 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl" (OuterVolumeSpecName: "kube-api-access-lftgl") pod "20f0eeca-d432-4c9a-9e82-6cd6527e8d65" (UID: "20f0eeca-d432-4c9a-9e82-6cd6527e8d65"). InnerVolumeSpecName "kube-api-access-lftgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.800418 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data" (OuterVolumeSpecName: "config-data") pod "20f0eeca-d432-4c9a-9e82-6cd6527e8d65" (UID: "20f0eeca-d432-4c9a-9e82-6cd6527e8d65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.809930 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts" (OuterVolumeSpecName: "scripts") pod "20f0eeca-d432-4c9a-9e82-6cd6527e8d65" (UID: "20f0eeca-d432-4c9a-9e82-6cd6527e8d65"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.863398 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.863596 4943 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.863656 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.863707 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lftgl\" (UniqueName: \"kubernetes.io/projected/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-kube-api-access-lftgl\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.863794 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/20f0eeca-d432-4c9a-9e82-6cd6527e8d65-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.876935 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 29 07:52:22 crc kubenswrapper[4943]: W1129 07:52:22.884535 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod128c6e68_82d5_4b0b_9734_3f86bd29385e.slice/crio-cf27c5f04a1591353b3b2b570a5eb63e02dbe120c4bce6e12572e9197be06906 WatchSource:0}: Error finding container cf27c5f04a1591353b3b2b570a5eb63e02dbe120c4bce6e12572e9197be06906: Status 404 returned error can't find the container with id cf27c5f04a1591353b3b2b570a5eb63e02dbe120c4bce6e12572e9197be06906 Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.904502 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 29 07:52:22 crc kubenswrapper[4943]: I1129 07:52:22.993397 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76b5fdb995-mtv4l" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.051230 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.051457 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="dnsmasq-dns" containerID="cri-o://9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a" gracePeriod=10 Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.209770 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7b8cb6d54f-fqmgs" event={"ID":"20f0eeca-d432-4c9a-9e82-6cd6527e8d65","Type":"ContainerDied","Data":"e885e6367b3cd5783763ad9db390101c0141ab3f8cc17ac55b737b059e4c16f9"} Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.210142 4943 scope.go:117] "RemoveContainer" containerID="29061c6fe75550b28c46c503b1a3d44230251766c3da812d44aa087bb7e7b9b3" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.210350 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7b8cb6d54f-fqmgs" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.216387 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerStarted","Data":"38af846b30c378c5b32c12852a8287abceee226f84a91fb3c05aa782ac8d348d"} Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.217728 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"128c6e68-82d5-4b0b-9734-3f86bd29385e","Type":"ContainerStarted","Data":"cf27c5f04a1591353b3b2b570a5eb63e02dbe120c4bce6e12572e9197be06906"} Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.226027 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bfc5c9977-ggjqb" event={"ID":"4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0","Type":"ContainerDied","Data":"467a7ff4cdadcb619e32aee2bee955c9be4f5a1b0d3923ade2b635c25cc11326"} Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.226120 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bfc5c9977-ggjqb" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.332601 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:52:23 crc kubenswrapper[4943]: E1129 07:52:23.333053 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.373166 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.373206 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7bfc5c9977-ggjqb"] Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.385325 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.392656 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7b8cb6d54f-fqmgs"] Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.454916 4943 scope.go:117] "RemoveContainer" containerID="9885765388ac8e11b4aad70a64c50902a95703ee2b90eba6d6a0f4ba710bafde" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.645786 4943 scope.go:117] "RemoveContainer" containerID="f302715b8d86ae30e1622c562fd6fdd51d0bb55c634282f60277b11de2219a75" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.660710 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.786745 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.786923 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.787114 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.787191 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.787266 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.787352 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lpz9\" (UniqueName: \"kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9\") pod \"cf023768-5d6d-4f46-bf87-b675b8a32480\" (UID: \"cf023768-5d6d-4f46-bf87-b675b8a32480\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.796284 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9" (OuterVolumeSpecName: "kube-api-access-2lpz9") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "kube-api-access-2lpz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.837685 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.848177 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.849111 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.852961 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.855224 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config" (OuterVolumeSpecName: "config") pod "cf023768-5d6d-4f46-bf87-b675b8a32480" (UID: "cf023768-5d6d-4f46-bf87-b675b8a32480"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.883901 4943 scope.go:117] "RemoveContainer" containerID="c540d975e2a3e06617f39b2826d11f127dabea884857f3d8ffeada387f983a29" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889907 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889927 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889937 4943 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-config\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889946 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lpz9\" (UniqueName: \"kubernetes.io/projected/cf023768-5d6d-4f46-bf87-b675b8a32480-kube-api-access-2lpz9\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889954 4943 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.889964 4943 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf023768-5d6d-4f46-bf87-b675b8a32480-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.904527 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.990744 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.990795 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.990852 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.990953 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c2m2\" (UniqueName: \"kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.991031 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.991073 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.991244 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.991293 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs\") pod \"6314bf60-e362-40ed-aee3-b35fd22cac1d\" (UID: \"6314bf60-e362-40ed-aee3-b35fd22cac1d\") " Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.993144 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.993710 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:23 crc kubenswrapper[4943]: I1129 07:52:23.997067 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2" (OuterVolumeSpecName: "kube-api-access-2c2m2") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "kube-api-access-2c2m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.026928 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts" (OuterVolumeSpecName: "scripts") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.093283 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.093473 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.093798 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6314bf60-e362-40ed-aee3-b35fd22cac1d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.093887 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.093978 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c2m2\" (UniqueName: \"kubernetes.io/projected/6314bf60-e362-40ed-aee3-b35fd22cac1d-kube-api-access-2c2m2\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.113014 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.124465 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.196035 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.196211 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.196301 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.227055 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data" (OuterVolumeSpecName: "config-data") pod "6314bf60-e362-40ed-aee3-b35fd22cac1d" (UID: "6314bf60-e362-40ed-aee3-b35fd22cac1d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.242994 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerStarted","Data":"5a83181da6bc7670402bcc10b18772cb2a3ab83db5464026eabeef44314c67d9"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.244932 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"128c6e68-82d5-4b0b-9734-3f86bd29385e","Type":"ContainerStarted","Data":"bfcf4bd97736c5450857bc30d121f4ac069505d9fcbc849ba7eb707e3da90477"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.244956 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"128c6e68-82d5-4b0b-9734-3f86bd29385e","Type":"ContainerStarted","Data":"ce7b4830b18a3dd9649cff523f9237d7d1e09afa151f547445fab200ae7dd275"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.245094 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.247975 4943 generic.go:334] "Generic (PLEG): container finished" podID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerID="cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7" exitCode=0 Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.248033 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerDied","Data":"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.248061 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6314bf60-e362-40ed-aee3-b35fd22cac1d","Type":"ContainerDied","Data":"e8961f2e9da2808a5f9efded300af6c607dbddc8191de59bc7550b712bc6285f"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.248078 4943 scope.go:117] "RemoveContainer" containerID="d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.248166 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.252402 4943 generic.go:334] "Generic (PLEG): container finished" podID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerID="9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a" exitCode=0 Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.252449 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" event={"ID":"cf023768-5d6d-4f46-bf87-b675b8a32480","Type":"ContainerDied","Data":"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.252476 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" event={"ID":"cf023768-5d6d-4f46-bf87-b675b8a32480","Type":"ContainerDied","Data":"b758215beaa1c4d2f2f9202646e7131d4c3232aa0787cafb8416efdcf770ad22"} Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.252535 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fr7bp" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.266748 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.561571323 podStartE2EDuration="12.266712559s" podCreationTimestamp="2025-11-29 07:52:12 +0000 UTC" firstStartedPulling="2025-11-29 07:52:13.545786322 +0000 UTC m=+4708.475875075" lastFinishedPulling="2025-11-29 07:52:22.250927558 +0000 UTC m=+4717.181016311" observedRunningTime="2025-11-29 07:52:24.263345677 +0000 UTC m=+4719.193434430" watchObservedRunningTime="2025-11-29 07:52:24.266712559 +0000 UTC m=+4719.196801312" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.297934 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=6.297913429 podStartE2EDuration="6.297913429s" podCreationTimestamp="2025-11-29 07:52:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:52:24.289666748 +0000 UTC m=+4719.219755531" watchObservedRunningTime="2025-11-29 07:52:24.297913429 +0000 UTC m=+4719.228002182" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.298077 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6314bf60-e362-40ed-aee3-b35fd22cac1d-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.322480 4943 scope.go:117] "RemoveContainer" containerID="4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.361725 4943 scope.go:117] "RemoveContainer" containerID="cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.361857 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.371696 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fr7bp"] Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.378984 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.383711 4943 scope.go:117] "RemoveContainer" containerID="b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.385944 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393151 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393706 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393727 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393747 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="init" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393759 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="init" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393776 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="sg-core" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393784 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="sg-core" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393807 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393816 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393828 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="proxy-httpd" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393836 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="proxy-httpd" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393857 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393865 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393891 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-central-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393899 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-central-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393915 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-notification-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393925 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-notification-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393947 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393957 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.393969 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="dnsmasq-dns" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.393978 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="dnsmasq-dns" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394226 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394258 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394276 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-notification-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394285 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" containerName="dnsmasq-dns" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394300 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="proxy-httpd" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394315 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="ceilometer-central-agent" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394324 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394338 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" containerName="sg-core" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.394356 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" containerName="horizon-log" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.398341 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.399573 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.402633 4943 scope.go:117] "RemoveContainer" containerID="d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.402851 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.402866 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.403006 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.428159 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16\": container with ID starting with d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16 not found: ID does not exist" containerID="d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.428199 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16"} err="failed to get container status \"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16\": rpc error: code = NotFound desc = could not find container \"d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16\": container with ID starting with d533fe7d9d684405b4afe9a5965f4796c730b400a589f61164e07d821641da16 not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.428226 4943 scope.go:117] "RemoveContainer" containerID="4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.429247 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803\": container with ID starting with 4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803 not found: ID does not exist" containerID="4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.429293 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803"} err="failed to get container status \"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803\": rpc error: code = NotFound desc = could not find container \"4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803\": container with ID starting with 4f8344bffd64d187c7aeeca7f7dfcf035a4b26df5d9c779fc1b4c4099acb7803 not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.429322 4943 scope.go:117] "RemoveContainer" containerID="cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.432400 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7\": container with ID starting with cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7 not found: ID does not exist" containerID="cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.432425 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7"} err="failed to get container status \"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7\": rpc error: code = NotFound desc = could not find container \"cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7\": container with ID starting with cf9dc81aa597cc22d6b0d231cd8f7cb9da5ce754b6a3d03b43eb5cd3743ad3e7 not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.432441 4943 scope.go:117] "RemoveContainer" containerID="b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.432726 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6\": container with ID starting with b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6 not found: ID does not exist" containerID="b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.432745 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6"} err="failed to get container status \"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6\": rpc error: code = NotFound desc = could not find container \"b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6\": container with ID starting with b4ee8573786e61e3879d836e79472989f54f73a589aa75d857c17b76df97cbf6 not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.432757 4943 scope.go:117] "RemoveContainer" containerID="9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.488324 4943 scope.go:117] "RemoveContainer" containerID="46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502229 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502283 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502309 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502370 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b92tk\" (UniqueName: \"kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502830 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.502984 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.503024 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.503051 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.568469 4943 scope.go:117] "RemoveContainer" containerID="9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.573950 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a\": container with ID starting with 9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a not found: ID does not exist" containerID="9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.573990 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a"} err="failed to get container status \"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a\": rpc error: code = NotFound desc = could not find container \"9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a\": container with ID starting with 9a834f8b3ff6f063221aa22b51e2bd18f627cc77f6f10ac7f169bf9ae77adb6a not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.574017 4943 scope.go:117] "RemoveContainer" containerID="46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7" Nov 29 07:52:24 crc kubenswrapper[4943]: E1129 07:52:24.574369 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7\": container with ID starting with 46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7 not found: ID does not exist" containerID="46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.574415 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7"} err="failed to get container status \"46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7\": rpc error: code = NotFound desc = could not find container \"46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7\": container with ID starting with 46054cacbee734a421d82aad4663c9f286df729912461f58c2d308678e4ea5b7 not found: ID does not exist" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.605846 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.606135 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.606295 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.606448 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.607051 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.607608 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.607822 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b92tk\" (UniqueName: \"kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.608009 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.607398 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.608513 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.610202 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.610416 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.614734 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.615957 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.616198 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.623054 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b92tk\" (UniqueName: \"kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk\") pod \"ceilometer-0\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " pod="openstack/ceilometer-0" Nov 29 07:52:24 crc kubenswrapper[4943]: I1129 07:52:24.768759 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.263193 4943 generic.go:334] "Generic (PLEG): container finished" podID="2401492d-b549-43d0-988e-4d1235af15cf" containerID="f57733cbd771d9b14f6636e01a0b1b3660f074cc9c1d32d021e83c7a572f150a" exitCode=0 Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.263289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerDied","Data":"f57733cbd771d9b14f6636e01a0b1b3660f074cc9c1d32d021e83c7a572f150a"} Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.326825 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:25 crc kubenswrapper[4943]: W1129 07:52:25.329324 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06eac98e_b132_4399_86a5_4e75d1df812c.slice/crio-73b6baedbf305347e8f5f742b01bc84454320bb571c465c8b216714e44ee6546 WatchSource:0}: Error finding container 73b6baedbf305347e8f5f742b01bc84454320bb571c465c8b216714e44ee6546: Status 404 returned error can't find the container with id 73b6baedbf305347e8f5f742b01bc84454320bb571c465c8b216714e44ee6546 Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.340448 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20f0eeca-d432-4c9a-9e82-6cd6527e8d65" path="/var/lib/kubelet/pods/20f0eeca-d432-4c9a-9e82-6cd6527e8d65/volumes" Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.341293 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0" path="/var/lib/kubelet/pods/4a57a9eb-7066-4bba-a3d4-b9b235dd5fd0/volumes" Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.341888 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6314bf60-e362-40ed-aee3-b35fd22cac1d" path="/var/lib/kubelet/pods/6314bf60-e362-40ed-aee3-b35fd22cac1d/volumes" Nov 29 07:52:25 crc kubenswrapper[4943]: I1129 07:52:25.343138 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf023768-5d6d-4f46-bf87-b675b8a32480" path="/var/lib/kubelet/pods/cf023768-5d6d-4f46-bf87-b675b8a32480/volumes" Nov 29 07:52:26 crc kubenswrapper[4943]: I1129 07:52:26.277135 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerStarted","Data":"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496"} Nov 29 07:52:26 crc kubenswrapper[4943]: I1129 07:52:26.277177 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerStarted","Data":"73b6baedbf305347e8f5f742b01bc84454320bb571c465c8b216714e44ee6546"} Nov 29 07:52:26 crc kubenswrapper[4943]: I1129 07:52:26.469808 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:26 crc kubenswrapper[4943]: I1129 07:52:26.548527 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.0:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.0:8443: connect: connection refused" Nov 29 07:52:27 crc kubenswrapper[4943]: I1129 07:52:27.289685 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerStarted","Data":"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10"} Nov 29 07:52:28 crc kubenswrapper[4943]: I1129 07:52:28.300586 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerStarted","Data":"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7"} Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.339540 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerStarted","Data":"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0"} Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.340109 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.339988 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="proxy-httpd" containerID="cri-o://727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0" gracePeriod=30 Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.339931 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-central-agent" containerID="cri-o://9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496" gracePeriod=30 Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.339991 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="sg-core" containerID="cri-o://09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7" gracePeriod=30 Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.339979 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-notification-agent" containerID="cri-o://4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10" gracePeriod=30 Nov 29 07:52:30 crc kubenswrapper[4943]: I1129 07:52:30.381912 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.455779131 podStartE2EDuration="6.381892413s" podCreationTimestamp="2025-11-29 07:52:24 +0000 UTC" firstStartedPulling="2025-11-29 07:52:25.33167644 +0000 UTC m=+4720.261765193" lastFinishedPulling="2025-11-29 07:52:29.257789732 +0000 UTC m=+4724.187878475" observedRunningTime="2025-11-29 07:52:30.375753714 +0000 UTC m=+4725.305842467" watchObservedRunningTime="2025-11-29 07:52:30.381892413 +0000 UTC m=+4725.311981166" Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350099 4943 generic.go:334] "Generic (PLEG): container finished" podID="06eac98e-b132-4399-86a5-4e75d1df812c" containerID="727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0" exitCode=0 Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350355 4943 generic.go:334] "Generic (PLEG): container finished" podID="06eac98e-b132-4399-86a5-4e75d1df812c" containerID="09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7" exitCode=2 Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350363 4943 generic.go:334] "Generic (PLEG): container finished" podID="06eac98e-b132-4399-86a5-4e75d1df812c" containerID="4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10" exitCode=0 Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350199 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerDied","Data":"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0"} Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350391 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerDied","Data":"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7"} Nov 29 07:52:31 crc kubenswrapper[4943]: I1129 07:52:31.350402 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerDied","Data":"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10"} Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.158207 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278354 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278414 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278517 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278604 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278661 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278694 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b92tk\" (UniqueName: \"kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278730 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.278761 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml\") pod \"06eac98e-b132-4399-86a5-4e75d1df812c\" (UID: \"06eac98e-b132-4399-86a5-4e75d1df812c\") " Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.280167 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.281012 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.285308 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk" (OuterVolumeSpecName: "kube-api-access-b92tk") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "kube-api-access-b92tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.302710 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts" (OuterVolumeSpecName: "scripts") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.319040 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.342069 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.362947 4943 generic.go:334] "Generic (PLEG): container finished" podID="06eac98e-b132-4399-86a5-4e75d1df812c" containerID="9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496" exitCode=0 Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.363026 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerDied","Data":"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496"} Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.363059 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"06eac98e-b132-4399-86a5-4e75d1df812c","Type":"ContainerDied","Data":"73b6baedbf305347e8f5f742b01bc84454320bb571c465c8b216714e44ee6546"} Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.363082 4943 scope.go:117] "RemoveContainer" containerID="727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.363260 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.372785 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381498 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b92tk\" (UniqueName: \"kubernetes.io/projected/06eac98e-b132-4399-86a5-4e75d1df812c-kube-api-access-b92tk\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381541 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381554 4943 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381585 4943 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381598 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381610 4943 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.381622 4943 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06eac98e-b132-4399-86a5-4e75d1df812c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.390516 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data" (OuterVolumeSpecName: "config-data") pod "06eac98e-b132-4399-86a5-4e75d1df812c" (UID: "06eac98e-b132-4399-86a5-4e75d1df812c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.404323 4943 scope.go:117] "RemoveContainer" containerID="09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.422676 4943 scope.go:117] "RemoveContainer" containerID="4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.440821 4943 scope.go:117] "RemoveContainer" containerID="9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.459490 4943 scope.go:117] "RemoveContainer" containerID="727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.459939 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0\": container with ID starting with 727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0 not found: ID does not exist" containerID="727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.459978 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0"} err="failed to get container status \"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0\": rpc error: code = NotFound desc = could not find container \"727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0\": container with ID starting with 727c27a8cd098fd37a4f2438c9ded552064e016e43748a5bf5475b1f79cabfa0 not found: ID does not exist" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.460001 4943 scope.go:117] "RemoveContainer" containerID="09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.460395 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7\": container with ID starting with 09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7 not found: ID does not exist" containerID="09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.460419 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7"} err="failed to get container status \"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7\": rpc error: code = NotFound desc = could not find container \"09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7\": container with ID starting with 09ed8d2d9db7d05f1ee05d40465a26b4fc3aa6559302d662bad1575d59aeaab7 not found: ID does not exist" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.460435 4943 scope.go:117] "RemoveContainer" containerID="4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.460803 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10\": container with ID starting with 4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10 not found: ID does not exist" containerID="4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.460842 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10"} err="failed to get container status \"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10\": rpc error: code = NotFound desc = could not find container \"4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10\": container with ID starting with 4613eaad55124ce50144e29983606b4793e1dbf75cb98921ca38725f269a9b10 not found: ID does not exist" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.460870 4943 scope.go:117] "RemoveContainer" containerID="9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.461215 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496\": container with ID starting with 9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496 not found: ID does not exist" containerID="9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.461246 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496"} err="failed to get container status \"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496\": rpc error: code = NotFound desc = could not find container \"9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496\": container with ID starting with 9ec46aecc2461977726ecee3bb298d734125d4544b90c5e7a7712d1e524c1496 not found: ID does not exist" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.483494 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06eac98e-b132-4399-86a5-4e75d1df812c-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.702837 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.712302 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.738885 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.739682 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="sg-core" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.739712 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="sg-core" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.739745 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-notification-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.739755 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-notification-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.739772 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="proxy-httpd" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.739780 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="proxy-httpd" Nov 29 07:52:32 crc kubenswrapper[4943]: E1129 07:52:32.739812 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-central-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.739820 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-central-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.740059 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-notification-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.740080 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="sg-core" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.740096 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="proxy-httpd" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.740165 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" containerName="ceilometer-central-agent" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.743596 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.749109 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.749509 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.749739 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.759782 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788402 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788462 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-config-data\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788516 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-log-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788630 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btgjz\" (UniqueName: \"kubernetes.io/projected/969ecbf0-186b-4121-b56a-998af3fc2e9e-kube-api-access-btgjz\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788710 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788731 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-scripts\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788769 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.788868 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-run-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.861816 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.890946 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-run-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891439 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891386 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-run-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891530 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-config-data\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891703 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-log-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891789 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btgjz\" (UniqueName: \"kubernetes.io/projected/969ecbf0-186b-4121-b56a-998af3fc2e9e-kube-api-access-btgjz\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891863 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891892 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-scripts\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.891925 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.892849 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/969ecbf0-186b-4121-b56a-998af3fc2e9e-log-httpd\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.895229 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.895891 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.906287 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-scripts\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.906835 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-config-data\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.908304 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/969ecbf0-186b-4121-b56a-998af3fc2e9e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:32 crc kubenswrapper[4943]: I1129 07:52:32.918630 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btgjz\" (UniqueName: \"kubernetes.io/projected/969ecbf0-186b-4121-b56a-998af3fc2e9e-kube-api-access-btgjz\") pod \"ceilometer-0\" (UID: \"969ecbf0-186b-4121-b56a-998af3fc2e9e\") " pod="openstack/ceilometer-0" Nov 29 07:52:33 crc kubenswrapper[4943]: I1129 07:52:33.068969 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 07:52:33 crc kubenswrapper[4943]: I1129 07:52:33.339117 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06eac98e-b132-4399-86a5-4e75d1df812c" path="/var/lib/kubelet/pods/06eac98e-b132-4399-86a5-4e75d1df812c/volumes" Nov 29 07:52:33 crc kubenswrapper[4943]: I1129 07:52:33.591205 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 07:52:33 crc kubenswrapper[4943]: W1129 07:52:33.602758 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod969ecbf0_186b_4121_b56a_998af3fc2e9e.slice/crio-ac9958a14b054400e34f44c191c3fbcc7ebead307e833cfe5b9ecf33c70ab169 WatchSource:0}: Error finding container ac9958a14b054400e34f44c191c3fbcc7ebead307e833cfe5b9ecf33c70ab169: Status 404 returned error can't find the container with id ac9958a14b054400e34f44c191c3fbcc7ebead307e833cfe5b9ecf33c70ab169 Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.328659 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:52:34 crc kubenswrapper[4943]: E1129 07:52:34.329235 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.375273 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.384295 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"969ecbf0-186b-4121-b56a-998af3fc2e9e","Type":"ContainerStarted","Data":"e6e4ac0d0eff7bbacc8868a7f424272ee82ee415e0790e48cca12b082ada71ef"} Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.384335 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"969ecbf0-186b-4121-b56a-998af3fc2e9e","Type":"ContainerStarted","Data":"ac9958a14b054400e34f44c191c3fbcc7ebead307e833cfe5b9ecf33c70ab169"} Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.436650 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.465810 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 29 07:52:34 crc kubenswrapper[4943]: I1129 07:52:34.518980 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:35 crc kubenswrapper[4943]: I1129 07:52:35.393948 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="manila-share" containerID="cri-o://38af846b30c378c5b32c12852a8287abceee226f84a91fb3c05aa782ac8d348d" gracePeriod=30 Nov 29 07:52:35 crc kubenswrapper[4943]: I1129 07:52:35.394481 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"969ecbf0-186b-4121-b56a-998af3fc2e9e","Type":"ContainerStarted","Data":"137c382c0c5c64aa77deeed4857c89e0517cf1d950731096142aab688eaa6038"} Nov 29 07:52:35 crc kubenswrapper[4943]: I1129 07:52:35.394633 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="manila-scheduler" containerID="cri-o://501976e6a8d58428e202b991df1b1501a92c9b75aee73624a132e4fa89d19d8b" gracePeriod=30 Nov 29 07:52:35 crc kubenswrapper[4943]: I1129 07:52:35.394920 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="probe" containerID="cri-o://5a83181da6bc7670402bcc10b18772cb2a3ab83db5464026eabeef44314c67d9" gracePeriod=30 Nov 29 07:52:35 crc kubenswrapper[4943]: I1129 07:52:35.394976 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="probe" containerID="cri-o://93789fd41c1636f91afe1d181bd5590dcf8d91b83219bab82782f916a92c687d" gracePeriod=30 Nov 29 07:52:36 crc kubenswrapper[4943]: I1129 07:52:36.424891 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerID="93789fd41c1636f91afe1d181bd5590dcf8d91b83219bab82782f916a92c687d" exitCode=0 Nov 29 07:52:36 crc kubenswrapper[4943]: I1129 07:52:36.424974 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerDied","Data":"93789fd41c1636f91afe1d181bd5590dcf8d91b83219bab82782f916a92c687d"} Nov 29 07:52:36 crc kubenswrapper[4943]: I1129 07:52:36.548190 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.0:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.0:8443: connect: connection refused" Nov 29 07:52:38 crc kubenswrapper[4943]: I1129 07:52:38.447744 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"969ecbf0-186b-4121-b56a-998af3fc2e9e","Type":"ContainerStarted","Data":"1eb543b62b4ca2277a07f022dc83c652f6be36abdd9ec9cefe2266fe92e594bc"} Nov 29 07:52:38 crc kubenswrapper[4943]: I1129 07:52:38.450426 4943 generic.go:334] "Generic (PLEG): container finished" podID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerID="5a83181da6bc7670402bcc10b18772cb2a3ab83db5464026eabeef44314c67d9" exitCode=0 Nov 29 07:52:38 crc kubenswrapper[4943]: I1129 07:52:38.450463 4943 generic.go:334] "Generic (PLEG): container finished" podID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerID="38af846b30c378c5b32c12852a8287abceee226f84a91fb3c05aa782ac8d348d" exitCode=1 Nov 29 07:52:38 crc kubenswrapper[4943]: I1129 07:52:38.450478 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerDied","Data":"5a83181da6bc7670402bcc10b18772cb2a3ab83db5464026eabeef44314c67d9"} Nov 29 07:52:38 crc kubenswrapper[4943]: I1129 07:52:38.450504 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerDied","Data":"38af846b30c378c5b32c12852a8287abceee226f84a91fb3c05aa782ac8d348d"} Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.287541 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366270 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366380 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366458 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366525 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366612 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366660 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xjwr\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366718 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366786 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph\") pod \"b5dbf0d5-a834-4366-8d63-6d2672446171\" (UID: \"b5dbf0d5-a834-4366-8d63-6d2672446171\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366835 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.366902 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.367290 4943 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.367312 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5dbf0d5-a834-4366-8d63-6d2672446171-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.396222 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts" (OuterVolumeSpecName: "scripts") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.403371 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.406807 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr" (OuterVolumeSpecName: "kube-api-access-4xjwr") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "kube-api-access-4xjwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.406936 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph" (OuterVolumeSpecName: "ceph") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.471088 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.471119 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xjwr\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-kube-api-access-4xjwr\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.471133 4943 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b5dbf0d5-a834-4366-8d63-6d2672446171-ceph\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.471144 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.482136 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"b5dbf0d5-a834-4366-8d63-6d2672446171","Type":"ContainerDied","Data":"86317e3c0c2e45cb05b9ce44c7cbfff5232efb8bd9046a0a29a051a6a04e6821"} Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.482186 4943 scope.go:117] "RemoveContainer" containerID="5a83181da6bc7670402bcc10b18772cb2a3ab83db5464026eabeef44314c67d9" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.482298 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.508999 4943 generic.go:334] "Generic (PLEG): container finished" podID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerID="501976e6a8d58428e202b991df1b1501a92c9b75aee73624a132e4fa89d19d8b" exitCode=0 Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.509048 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerDied","Data":"501976e6a8d58428e202b991df1b1501a92c9b75aee73624a132e4fa89d19d8b"} Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.512704 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.517319 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.536489 4943 scope.go:117] "RemoveContainer" containerID="38af846b30c378c5b32c12852a8287abceee226f84a91fb3c05aa782ac8d348d" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.546773 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data" (OuterVolumeSpecName: "config-data") pod "b5dbf0d5-a834-4366-8d63-6d2672446171" (UID: "b5dbf0d5-a834-4366-8d63-6d2672446171"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572356 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572406 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gft7\" (UniqueName: \"kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572442 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572536 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572642 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.572735 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data\") pod \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\" (UID: \"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57\") " Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.573852 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.573869 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5dbf0d5-a834-4366-8d63-6d2672446171-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.577230 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.577611 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts" (OuterVolumeSpecName: "scripts") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.591124 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7" (OuterVolumeSpecName: "kube-api-access-7gft7") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "kube-api-access-7gft7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.592712 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.625670 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.682417 4943 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.683663 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gft7\" (UniqueName: \"kubernetes.io/projected/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-kube-api-access-7gft7\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.684291 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.684307 4943 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.684320 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.687840 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data" (OuterVolumeSpecName: "config-data") pod "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" (UID: "c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.786276 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.816576 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.826147 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.839191 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:39 crc kubenswrapper[4943]: E1129 07:52:39.839665 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="manila-share" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.839691 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="manila-share" Nov 29 07:52:39 crc kubenswrapper[4943]: E1129 07:52:39.839720 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.839729 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: E1129 07:52:39.839747 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.839756 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: E1129 07:52:39.839771 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="manila-scheduler" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.839779 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="manila-scheduler" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.840037 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.840066 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="probe" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.840083 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" containerName="manila-scheduler" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.840101 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" containerName="manila-share" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.841409 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.844663 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 29 07:52:39 crc kubenswrapper[4943]: I1129 07:52:39.855438 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.004955 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-ceph\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005015 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005034 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn6nr\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-kube-api-access-tn6nr\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005193 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005300 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005352 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005447 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-scripts\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.005675 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.107606 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.107690 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-ceph\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.107959 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.107989 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn6nr\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-kube-api-access-tn6nr\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108060 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108140 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108328 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108456 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-scripts\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108235 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.108386 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52fc0d8a-63e5-4950-8dbd-88d88e77c913-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.113299 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.113338 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.113653 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-ceph\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.115086 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.124496 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52fc0d8a-63e5-4950-8dbd-88d88e77c913-scripts\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.133604 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn6nr\" (UniqueName: \"kubernetes.io/projected/52fc0d8a-63e5-4950-8dbd-88d88e77c913-kube-api-access-tn6nr\") pod \"manila-share-share1-0\" (UID: \"52fc0d8a-63e5-4950-8dbd-88d88e77c913\") " pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.157168 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.513396 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.525938 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57","Type":"ContainerDied","Data":"0c05507f35a55c52c201548b776f611a89ef8feb2c14a18bd6b082eede56184d"} Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.526031 4943 scope.go:117] "RemoveContainer" containerID="93789fd41c1636f91afe1d181bd5590dcf8d91b83219bab82782f916a92c687d" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.526221 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.532388 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"969ecbf0-186b-4121-b56a-998af3fc2e9e","Type":"ContainerStarted","Data":"9db4a81eb0a702e0ee9d28537a61ec2237258cba31f7c24cb541a3a65fb615c9"} Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.532482 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.569702 4943 scope.go:117] "RemoveContainer" containerID="501976e6a8d58428e202b991df1b1501a92c9b75aee73624a132e4fa89d19d8b" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.648904 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.752696972 podStartE2EDuration="8.64888157s" podCreationTimestamp="2025-11-29 07:52:32 +0000 UTC" firstStartedPulling="2025-11-29 07:52:33.604671856 +0000 UTC m=+4728.534760609" lastFinishedPulling="2025-11-29 07:52:39.500856454 +0000 UTC m=+4734.430945207" observedRunningTime="2025-11-29 07:52:40.630042201 +0000 UTC m=+4735.560130964" watchObservedRunningTime="2025-11-29 07:52:40.64888157 +0000 UTC m=+4735.578970323" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.688585 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.692286 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.702156 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.703719 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.706365 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.710912 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.770722 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.830809 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.830871 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128756d3-2ba0-493a-8ee8-c33f6312be8f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.830922 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.830953 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.831045 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-scripts\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.831172 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ftgw\" (UniqueName: \"kubernetes.io/projected/128756d3-2ba0-493a-8ee8-c33f6312be8f-kube-api-access-8ftgw\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933341 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ftgw\" (UniqueName: \"kubernetes.io/projected/128756d3-2ba0-493a-8ee8-c33f6312be8f-kube-api-access-8ftgw\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933404 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933443 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128756d3-2ba0-493a-8ee8-c33f6312be8f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933462 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933488 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933550 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-scripts\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.933599 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/128756d3-2ba0-493a-8ee8-c33f6312be8f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.938451 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-scripts\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.938538 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.938587 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.941005 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/128756d3-2ba0-493a-8ee8-c33f6312be8f-config-data\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:40 crc kubenswrapper[4943]: I1129 07:52:40.951547 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ftgw\" (UniqueName: \"kubernetes.io/projected/128756d3-2ba0-493a-8ee8-c33f6312be8f-kube-api-access-8ftgw\") pod \"manila-scheduler-0\" (UID: \"128756d3-2ba0-493a-8ee8-c33f6312be8f\") " pod="openstack/manila-scheduler-0" Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.026357 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.344443 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5dbf0d5-a834-4366-8d63-6d2672446171" path="/var/lib/kubelet/pods/b5dbf0d5-a834-4366-8d63-6d2672446171/volumes" Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.345547 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57" path="/var/lib/kubelet/pods/c9ae7bff-a79c-4ec4-ab77-d3f79b19fd57/volumes" Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.512336 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.571373 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"128756d3-2ba0-493a-8ee8-c33f6312be8f","Type":"ContainerStarted","Data":"6bbefa490ade2e41a4e6f4ad0731c8fbcf9f1e497e5cc9e646ca543c95c1afdd"} Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.575032 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"52fc0d8a-63e5-4950-8dbd-88d88e77c913","Type":"ContainerStarted","Data":"4ccde6d02a39857380416ba140a2bb30a4d267588322aa272ec6ce6a2c22e04b"} Nov 29 07:52:41 crc kubenswrapper[4943]: I1129 07:52:41.575075 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"52fc0d8a-63e5-4950-8dbd-88d88e77c913","Type":"ContainerStarted","Data":"37f30fa07ebbef26d23982f66ac9b9478c513ccdf5111332d82573ac2221e9d7"} Nov 29 07:52:42 crc kubenswrapper[4943]: I1129 07:52:42.589051 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"128756d3-2ba0-493a-8ee8-c33f6312be8f","Type":"ContainerStarted","Data":"7bb4caa4dcb0bd7f4100ccb0a55cee68b9b7f6981987586dc0f9609ad51084f9"} Nov 29 07:52:42 crc kubenswrapper[4943]: I1129 07:52:42.589708 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"128756d3-2ba0-493a-8ee8-c33f6312be8f","Type":"ContainerStarted","Data":"5595fb39811a65906a9c6fc0b7619978fec5b346f88138140e88950a08b79856"} Nov 29 07:52:42 crc kubenswrapper[4943]: I1129 07:52:42.592410 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"52fc0d8a-63e5-4950-8dbd-88d88e77c913","Type":"ContainerStarted","Data":"725a46cf1fcb9c489589c8829cb9e6924769c4b7e087d08f1aa0dff66048d73d"} Nov 29 07:52:42 crc kubenswrapper[4943]: I1129 07:52:42.612004 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.611986216 podStartE2EDuration="3.611986216s" podCreationTimestamp="2025-11-29 07:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:52:42.607402565 +0000 UTC m=+4737.537491328" watchObservedRunningTime="2025-11-29 07:52:42.611986216 +0000 UTC m=+4737.542074969" Nov 29 07:52:43 crc kubenswrapper[4943]: I1129 07:52:43.626598 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.62656481 podStartE2EDuration="3.62656481s" podCreationTimestamp="2025-11-29 07:52:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 07:52:43.617189051 +0000 UTC m=+4738.547277824" watchObservedRunningTime="2025-11-29 07:52:43.62656481 +0000 UTC m=+4738.556653563" Nov 29 07:52:46 crc kubenswrapper[4943]: I1129 07:52:46.328270 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:52:46 crc kubenswrapper[4943]: E1129 07:52:46.329467 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:52:46 crc kubenswrapper[4943]: I1129 07:52:46.548331 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5897fd8cd4-cfjb4" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.0:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.0:8443: connect: connection refused" Nov 29 07:52:46 crc kubenswrapper[4943]: I1129 07:52:46.548453 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:52:50 crc kubenswrapper[4943]: I1129 07:52:50.157835 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.027868 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.699373 4943 generic.go:334] "Generic (PLEG): container finished" podID="2401492d-b549-43d0-988e-4d1235af15cf" containerID="87dc35cd40cf69972273e35c7178db0754791d0c9903ace3147529c5d7e1b7a2" exitCode=137 Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.699779 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerDied","Data":"87dc35cd40cf69972273e35c7178db0754791d0c9903ace3147529c5d7e1b7a2"} Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.699821 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5897fd8cd4-cfjb4" event={"ID":"2401492d-b549-43d0-988e-4d1235af15cf","Type":"ContainerDied","Data":"b1f6ff46c41feb9f747a9f78bf6ebccba79193d16ed04d03bffc88131d9ed505"} Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.699842 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1f6ff46c41feb9f747a9f78bf6ebccba79193d16ed04d03bffc88131d9ed505" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.764507 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951084 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951172 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951255 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rktlj\" (UniqueName: \"kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951289 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951338 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951477 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951597 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key\") pod \"2401492d-b549-43d0-988e-4d1235af15cf\" (UID: \"2401492d-b549-43d0-988e-4d1235af15cf\") " Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.951795 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs" (OuterVolumeSpecName: "logs") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.952241 4943 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2401492d-b549-43d0-988e-4d1235af15cf-logs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.958656 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.958916 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj" (OuterVolumeSpecName: "kube-api-access-rktlj") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "kube-api-access-rktlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.992125 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:51 crc kubenswrapper[4943]: I1129 07:52:51.997076 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts" (OuterVolumeSpecName: "scripts") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.017274 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data" (OuterVolumeSpecName: "config-data") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.028083 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "2401492d-b549-43d0-988e-4d1235af15cf" (UID: "2401492d-b549-43d0-988e-4d1235af15cf"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054334 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rktlj\" (UniqueName: \"kubernetes.io/projected/2401492d-b549-43d0-988e-4d1235af15cf-kube-api-access-rktlj\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054371 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054380 4943 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054390 4943 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2401492d-b549-43d0-988e-4d1235af15cf-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054399 4943 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.054408 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2401492d-b549-43d0-988e-4d1235af15cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.601262 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.709527 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5897fd8cd4-cfjb4" Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.752214 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:52:52 crc kubenswrapper[4943]: I1129 07:52:52.762908 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5897fd8cd4-cfjb4"] Nov 29 07:52:53 crc kubenswrapper[4943]: I1129 07:52:53.339143 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2401492d-b549-43d0-988e-4d1235af15cf" path="/var/lib/kubelet/pods/2401492d-b549-43d0-988e-4d1235af15cf/volumes" Nov 29 07:52:54 crc kubenswrapper[4943]: I1129 07:52:54.195161 4943 scope.go:117] "RemoveContainer" containerID="39b09d3f6c96293e4e3e64dff603ca9a908595112b0e2f46dee736c3bce7acef" Nov 29 07:52:54 crc kubenswrapper[4943]: I1129 07:52:54.228325 4943 scope.go:117] "RemoveContainer" containerID="312d0091031e9e59c769345fffaccba17053af9ae5cb6d5e36c2281b6bb77530" Nov 29 07:52:54 crc kubenswrapper[4943]: I1129 07:52:54.302799 4943 scope.go:117] "RemoveContainer" containerID="8cf3afa644c7360e438d7d07afa57ad1b32fb903895f33f164e9040c0d640aac" Nov 29 07:52:58 crc kubenswrapper[4943]: I1129 07:52:58.327111 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:52:58 crc kubenswrapper[4943]: E1129 07:52:58.327707 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:53:01 crc kubenswrapper[4943]: I1129 07:53:01.624593 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.815282 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:02 crc kubenswrapper[4943]: E1129 07:53:02.816600 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon-log" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.816678 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon-log" Nov 29 07:53:02 crc kubenswrapper[4943]: E1129 07:53:02.816742 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.816797 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.817043 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.817111 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2401492d-b549-43d0-988e-4d1235af15cf" containerName="horizon-log" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.818461 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.825780 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.960260 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.960727 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9bjz\" (UniqueName: \"kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:02 crc kubenswrapper[4943]: I1129 07:53:02.960782 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.062756 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.062921 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.062995 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9bjz\" (UniqueName: \"kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.063283 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.063552 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.080473 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.083860 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9bjz\" (UniqueName: \"kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz\") pod \"redhat-operators-t2qqj\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.142162 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.607518 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:03 crc kubenswrapper[4943]: W1129 07:53:03.613293 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76aede00_1958_44c2_8256_3e1c86aed994.slice/crio-cde97e50b48dfa97b3c3c52dd07323ed1bbb0042c35d745bcfe61e4776e5b7df WatchSource:0}: Error finding container cde97e50b48dfa97b3c3c52dd07323ed1bbb0042c35d745bcfe61e4776e5b7df: Status 404 returned error can't find the container with id cde97e50b48dfa97b3c3c52dd07323ed1bbb0042c35d745bcfe61e4776e5b7df Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.805141 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerStarted","Data":"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2"} Nov 29 07:53:03 crc kubenswrapper[4943]: I1129 07:53:03.805308 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerStarted","Data":"cde97e50b48dfa97b3c3c52dd07323ed1bbb0042c35d745bcfe61e4776e5b7df"} Nov 29 07:53:04 crc kubenswrapper[4943]: I1129 07:53:04.814257 4943 generic.go:334] "Generic (PLEG): container finished" podID="76aede00-1958-44c2-8256-3e1c86aed994" containerID="df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2" exitCode=0 Nov 29 07:53:04 crc kubenswrapper[4943]: I1129 07:53:04.814303 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerDied","Data":"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2"} Nov 29 07:53:05 crc kubenswrapper[4943]: I1129 07:53:05.827207 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerStarted","Data":"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073"} Nov 29 07:53:08 crc kubenswrapper[4943]: I1129 07:53:08.864918 4943 generic.go:334] "Generic (PLEG): container finished" podID="76aede00-1958-44c2-8256-3e1c86aed994" containerID="71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073" exitCode=0 Nov 29 07:53:08 crc kubenswrapper[4943]: I1129 07:53:08.865032 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerDied","Data":"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073"} Nov 29 07:53:09 crc kubenswrapper[4943]: I1129 07:53:09.878588 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerStarted","Data":"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98"} Nov 29 07:53:09 crc kubenswrapper[4943]: I1129 07:53:09.962267 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t2qqj" podStartSLOduration=3.503266431 podStartE2EDuration="7.962249767s" podCreationTimestamp="2025-11-29 07:53:02 +0000 UTC" firstStartedPulling="2025-11-29 07:53:04.815992993 +0000 UTC m=+4759.746081746" lastFinishedPulling="2025-11-29 07:53:09.274976329 +0000 UTC m=+4764.205065082" observedRunningTime="2025-11-29 07:53:09.946534854 +0000 UTC m=+4764.876623617" watchObservedRunningTime="2025-11-29 07:53:09.962249767 +0000 UTC m=+4764.892338520" Nov 29 07:53:12 crc kubenswrapper[4943]: I1129 07:53:12.327484 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:53:12 crc kubenswrapper[4943]: E1129 07:53:12.328083 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:53:13 crc kubenswrapper[4943]: I1129 07:53:13.143299 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:13 crc kubenswrapper[4943]: I1129 07:53:13.143537 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:14 crc kubenswrapper[4943]: I1129 07:53:14.195925 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t2qqj" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="registry-server" probeResult="failure" output=< Nov 29 07:53:14 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 07:53:14 crc kubenswrapper[4943]: > Nov 29 07:53:23 crc kubenswrapper[4943]: I1129 07:53:23.186804 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:23 crc kubenswrapper[4943]: I1129 07:53:23.234584 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:23 crc kubenswrapper[4943]: I1129 07:53:23.422205 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.052736 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t2qqj" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="registry-server" containerID="cri-o://b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98" gracePeriod=2 Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.503404 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.554693 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content\") pod \"76aede00-1958-44c2-8256-3e1c86aed994\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.554869 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9bjz\" (UniqueName: \"kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz\") pod \"76aede00-1958-44c2-8256-3e1c86aed994\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.554924 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities\") pod \"76aede00-1958-44c2-8256-3e1c86aed994\" (UID: \"76aede00-1958-44c2-8256-3e1c86aed994\") " Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.557418 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities" (OuterVolumeSpecName: "utilities") pod "76aede00-1958-44c2-8256-3e1c86aed994" (UID: "76aede00-1958-44c2-8256-3e1c86aed994"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.561291 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz" (OuterVolumeSpecName: "kube-api-access-t9bjz") pod "76aede00-1958-44c2-8256-3e1c86aed994" (UID: "76aede00-1958-44c2-8256-3e1c86aed994"). InnerVolumeSpecName "kube-api-access-t9bjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.646603 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76aede00-1958-44c2-8256-3e1c86aed994" (UID: "76aede00-1958-44c2-8256-3e1c86aed994"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.656574 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.656705 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9bjz\" (UniqueName: \"kubernetes.io/projected/76aede00-1958-44c2-8256-3e1c86aed994-kube-api-access-t9bjz\") on node \"crc\" DevicePath \"\"" Nov 29 07:53:25 crc kubenswrapper[4943]: I1129 07:53:25.656764 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76aede00-1958-44c2-8256-3e1c86aed994-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.062954 4943 generic.go:334] "Generic (PLEG): container finished" podID="76aede00-1958-44c2-8256-3e1c86aed994" containerID="b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98" exitCode=0 Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.063068 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2qqj" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.063095 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerDied","Data":"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98"} Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.064430 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2qqj" event={"ID":"76aede00-1958-44c2-8256-3e1c86aed994","Type":"ContainerDied","Data":"cde97e50b48dfa97b3c3c52dd07323ed1bbb0042c35d745bcfe61e4776e5b7df"} Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.064458 4943 scope.go:117] "RemoveContainer" containerID="b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.102759 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.107354 4943 scope.go:117] "RemoveContainer" containerID="71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.112661 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t2qqj"] Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.146054 4943 scope.go:117] "RemoveContainer" containerID="df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.184951 4943 scope.go:117] "RemoveContainer" containerID="b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98" Nov 29 07:53:26 crc kubenswrapper[4943]: E1129 07:53:26.185659 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98\": container with ID starting with b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98 not found: ID does not exist" containerID="b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.185698 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98"} err="failed to get container status \"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98\": rpc error: code = NotFound desc = could not find container \"b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98\": container with ID starting with b86dd182c356ce6b18187f8a909543ca91a6b4931221a6ff657039f2afa11c98 not found: ID does not exist" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.185724 4943 scope.go:117] "RemoveContainer" containerID="71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073" Nov 29 07:53:26 crc kubenswrapper[4943]: E1129 07:53:26.186045 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073\": container with ID starting with 71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073 not found: ID does not exist" containerID="71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.186069 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073"} err="failed to get container status \"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073\": rpc error: code = NotFound desc = could not find container \"71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073\": container with ID starting with 71620a11194af2b4cf2e4f0ad6d0f2e7b0a87d71b7c2bc2d760459702c017073 not found: ID does not exist" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.186088 4943 scope.go:117] "RemoveContainer" containerID="df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2" Nov 29 07:53:26 crc kubenswrapper[4943]: E1129 07:53:26.186472 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2\": container with ID starting with df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2 not found: ID does not exist" containerID="df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2" Nov 29 07:53:26 crc kubenswrapper[4943]: I1129 07:53:26.186496 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2"} err="failed to get container status \"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2\": rpc error: code = NotFound desc = could not find container \"df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2\": container with ID starting with df85f3ac260e7dead3b2bc699997b531cd7e0b9068362e0c1166aadc96cbdaa2 not found: ID does not exist" Nov 29 07:53:27 crc kubenswrapper[4943]: I1129 07:53:27.327974 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:53:27 crc kubenswrapper[4943]: E1129 07:53:27.328769 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:53:27 crc kubenswrapper[4943]: I1129 07:53:27.338685 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76aede00-1958-44c2-8256-3e1c86aed994" path="/var/lib/kubelet/pods/76aede00-1958-44c2-8256-3e1c86aed994/volumes" Nov 29 07:53:41 crc kubenswrapper[4943]: I1129 07:53:41.328143 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:53:41 crc kubenswrapper[4943]: E1129 07:53:41.329050 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:53:53 crc kubenswrapper[4943]: I1129 07:53:53.328504 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:53:53 crc kubenswrapper[4943]: E1129 07:53:53.329689 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.864741 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 07:54:06 crc kubenswrapper[4943]: E1129 07:54:06.865756 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="registry-server" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.865781 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="registry-server" Nov 29 07:54:06 crc kubenswrapper[4943]: E1129 07:54:06.865803 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="extract-utilities" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.865814 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="extract-utilities" Nov 29 07:54:06 crc kubenswrapper[4943]: E1129 07:54:06.865849 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="extract-content" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.865860 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="extract-content" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.866133 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="76aede00-1958-44c2-8256-3e1c86aed994" containerName="registry-server" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.867104 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.869128 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-9gct7" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.869550 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.870537 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.871313 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.888708 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.948402 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.948478 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.948667 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.948839 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.948915 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.949011 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m94cs\" (UniqueName: \"kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.949074 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.949304 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:06 crc kubenswrapper[4943]: I1129 07:54:06.949684 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.051732 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.051829 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.051876 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.051925 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052047 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052098 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052161 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m94cs\" (UniqueName: \"kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052209 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052305 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.052677 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.054019 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.054506 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.054526 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.055028 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.061046 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.062015 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.062502 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.077232 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m94cs\" (UniqueName: \"kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.127118 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.217530 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.327143 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:54:07 crc kubenswrapper[4943]: E1129 07:54:07.327503 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.697183 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.703268 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 07:54:07 crc kubenswrapper[4943]: I1129 07:54:07.736659 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"19a81291-2c8c-4099-a701-7f4049f4e890","Type":"ContainerStarted","Data":"0aa5280ba75ee13e9eb1d7bc93800a2a2da2ff251c9d22333844caacf3d45acb"} Nov 29 07:54:19 crc kubenswrapper[4943]: I1129 07:54:19.328912 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:54:19 crc kubenswrapper[4943]: E1129 07:54:19.330265 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 07:54:34 crc kubenswrapper[4943]: I1129 07:54:34.327777 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 07:54:44 crc kubenswrapper[4943]: E1129 07:54:44.520795 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 29 07:54:44 crc kubenswrapper[4943]: E1129 07:54:44.521986 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m94cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(19a81291-2c8c-4099-a701-7f4049f4e890): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 07:54:44 crc kubenswrapper[4943]: E1129 07:54:44.524893 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="19a81291-2c8c-4099-a701-7f4049f4e890" Nov 29 07:54:45 crc kubenswrapper[4943]: I1129 07:54:45.140301 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230"} Nov 29 07:54:45 crc kubenswrapper[4943]: E1129 07:54:45.142808 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="19a81291-2c8c-4099-a701-7f4049f4e890" Nov 29 07:54:59 crc kubenswrapper[4943]: I1129 07:54:59.788004 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 29 07:55:01 crc kubenswrapper[4943]: I1129 07:55:01.304453 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"19a81291-2c8c-4099-a701-7f4049f4e890","Type":"ContainerStarted","Data":"85ab34ab73b8e4a588fa3558fe8ef8bae3913ebea2c506b72e377a7d8f9ce45a"} Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.746167 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=102.665403115 podStartE2EDuration="2m34.74613548s" podCreationTimestamp="2025-11-29 07:54:05 +0000 UTC" firstStartedPulling="2025-11-29 07:54:07.702885233 +0000 UTC m=+4822.632974026" lastFinishedPulling="2025-11-29 07:54:59.783617648 +0000 UTC m=+4874.713706391" observedRunningTime="2025-11-29 07:55:01.336884259 +0000 UTC m=+4876.266973042" watchObservedRunningTime="2025-11-29 07:56:39.74613548 +0000 UTC m=+4974.676224273" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.752052 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.757077 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.767335 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.847067 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.847265 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.847300 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54pvx\" (UniqueName: \"kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.949428 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.949483 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54pvx\" (UniqueName: \"kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.949595 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.950181 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:39 crc kubenswrapper[4943]: I1129 07:56:39.950282 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:40 crc kubenswrapper[4943]: I1129 07:56:40.033069 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54pvx\" (UniqueName: \"kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx\") pod \"redhat-marketplace-qcbt6\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:40 crc kubenswrapper[4943]: I1129 07:56:40.090521 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:40 crc kubenswrapper[4943]: I1129 07:56:40.680735 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:41 crc kubenswrapper[4943]: I1129 07:56:41.473813 4943 generic.go:334] "Generic (PLEG): container finished" podID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerID="2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a" exitCode=0 Nov 29 07:56:41 crc kubenswrapper[4943]: I1129 07:56:41.473868 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerDied","Data":"2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a"} Nov 29 07:56:41 crc kubenswrapper[4943]: I1129 07:56:41.474172 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerStarted","Data":"5ddcab63727f4eedf70fb47ea7e00bf7e80c008c9754a3e9e5e8af11008b833a"} Nov 29 07:56:44 crc kubenswrapper[4943]: I1129 07:56:44.499584 4943 generic.go:334] "Generic (PLEG): container finished" podID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerID="30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc" exitCode=0 Nov 29 07:56:44 crc kubenswrapper[4943]: I1129 07:56:44.499662 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerDied","Data":"30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc"} Nov 29 07:56:45 crc kubenswrapper[4943]: I1129 07:56:45.511523 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerStarted","Data":"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9"} Nov 29 07:56:46 crc kubenswrapper[4943]: I1129 07:56:46.540109 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qcbt6" podStartSLOduration=3.98904978 podStartE2EDuration="7.540085428s" podCreationTimestamp="2025-11-29 07:56:39 +0000 UTC" firstStartedPulling="2025-11-29 07:56:41.475740551 +0000 UTC m=+4976.405829304" lastFinishedPulling="2025-11-29 07:56:45.026776199 +0000 UTC m=+4979.956864952" observedRunningTime="2025-11-29 07:56:46.533702396 +0000 UTC m=+4981.463791159" watchObservedRunningTime="2025-11-29 07:56:46.540085428 +0000 UTC m=+4981.470174181" Nov 29 07:56:50 crc kubenswrapper[4943]: I1129 07:56:50.091688 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:50 crc kubenswrapper[4943]: I1129 07:56:50.092315 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:50 crc kubenswrapper[4943]: I1129 07:56:50.489385 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:50 crc kubenswrapper[4943]: I1129 07:56:50.612837 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:50 crc kubenswrapper[4943]: I1129 07:56:50.745447 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:52 crc kubenswrapper[4943]: I1129 07:56:52.573063 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qcbt6" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="registry-server" containerID="cri-o://5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9" gracePeriod=2 Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.099245 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.118751 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54pvx\" (UniqueName: \"kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx\") pod \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.119300 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities\") pod \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.119659 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content\") pod \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\" (UID: \"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5\") " Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.120065 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities" (OuterVolumeSpecName: "utilities") pod "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" (UID: "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.121047 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.124776 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx" (OuterVolumeSpecName: "kube-api-access-54pvx") pod "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" (UID: "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5"). InnerVolumeSpecName "kube-api-access-54pvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.145421 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" (UID: "f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.223415 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54pvx\" (UniqueName: \"kubernetes.io/projected/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-kube-api-access-54pvx\") on node \"crc\" DevicePath \"\"" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.223480 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.585345 4943 generic.go:334] "Generic (PLEG): container finished" podID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerID="5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9" exitCode=0 Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.585416 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerDied","Data":"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9"} Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.585452 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcbt6" event={"ID":"f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5","Type":"ContainerDied","Data":"5ddcab63727f4eedf70fb47ea7e00bf7e80c008c9754a3e9e5e8af11008b833a"} Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.585473 4943 scope.go:117] "RemoveContainer" containerID="5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.585640 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcbt6" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.613131 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.618946 4943 scope.go:117] "RemoveContainer" containerID="30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.623403 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcbt6"] Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.665382 4943 scope.go:117] "RemoveContainer" containerID="2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.701807 4943 scope.go:117] "RemoveContainer" containerID="5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9" Nov 29 07:56:53 crc kubenswrapper[4943]: E1129 07:56:53.702184 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9\": container with ID starting with 5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9 not found: ID does not exist" containerID="5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.702219 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9"} err="failed to get container status \"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9\": rpc error: code = NotFound desc = could not find container \"5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9\": container with ID starting with 5d41c84488d8045b3dc96a658ccd331c35c91ecd6ef124a113542994180750c9 not found: ID does not exist" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.702251 4943 scope.go:117] "RemoveContainer" containerID="30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc" Nov 29 07:56:53 crc kubenswrapper[4943]: E1129 07:56:53.702651 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc\": container with ID starting with 30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc not found: ID does not exist" containerID="30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.702680 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc"} err="failed to get container status \"30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc\": rpc error: code = NotFound desc = could not find container \"30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc\": container with ID starting with 30cf6891a35d0316b577f7ba384d75b6171393d5305cde736852fa552b239afc not found: ID does not exist" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.702695 4943 scope.go:117] "RemoveContainer" containerID="2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a" Nov 29 07:56:53 crc kubenswrapper[4943]: E1129 07:56:53.703320 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a\": container with ID starting with 2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a not found: ID does not exist" containerID="2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a" Nov 29 07:56:53 crc kubenswrapper[4943]: I1129 07:56:53.703344 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a"} err="failed to get container status \"2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a\": rpc error: code = NotFound desc = could not find container \"2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a\": container with ID starting with 2228619c79cc802195706f616bba7a9bb8f240f5826b63bfcf09e0333fac385a not found: ID does not exist" Nov 29 07:56:55 crc kubenswrapper[4943]: I1129 07:56:55.339865 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" path="/var/lib/kubelet/pods/f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5/volumes" Nov 29 07:57:02 crc kubenswrapper[4943]: I1129 07:57:02.613190 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:57:02 crc kubenswrapper[4943]: I1129 07:57:02.613697 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.955392 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:16 crc kubenswrapper[4943]: E1129 07:57:16.957651 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="extract-content" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.957676 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="extract-content" Nov 29 07:57:16 crc kubenswrapper[4943]: E1129 07:57:16.957689 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="registry-server" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.957699 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="registry-server" Nov 29 07:57:16 crc kubenswrapper[4943]: E1129 07:57:16.957752 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="extract-utilities" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.957764 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="extract-utilities" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.958020 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8c5336f-c6a5-4ca0-94fa-d36dc3d743f5" containerName="registry-server" Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.990719 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:16 crc kubenswrapper[4943]: I1129 07:57:16.990895 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.103231 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.103304 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cj77\" (UniqueName: \"kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.103356 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.205261 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.205458 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.205554 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cj77\" (UniqueName: \"kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.206660 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.206718 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.238111 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cj77\" (UniqueName: \"kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77\") pod \"community-operators-rfskk\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.321209 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:17 crc kubenswrapper[4943]: I1129 07:57:17.867460 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:18 crc kubenswrapper[4943]: I1129 07:57:18.843257 4943 generic.go:334] "Generic (PLEG): container finished" podID="e2037b77-79d0-4633-94fc-256885b83add" containerID="08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c" exitCode=0 Nov 29 07:57:18 crc kubenswrapper[4943]: I1129 07:57:18.843425 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerDied","Data":"08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c"} Nov 29 07:57:18 crc kubenswrapper[4943]: I1129 07:57:18.843733 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerStarted","Data":"db48ea65f707f5ef6f4e231b42143221f27f2526b81835f4e23f939dd1320cf9"} Nov 29 07:57:21 crc kubenswrapper[4943]: I1129 07:57:21.881963 4943 generic.go:334] "Generic (PLEG): container finished" podID="e2037b77-79d0-4633-94fc-256885b83add" containerID="46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed" exitCode=0 Nov 29 07:57:21 crc kubenswrapper[4943]: I1129 07:57:21.882464 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerDied","Data":"46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed"} Nov 29 07:57:22 crc kubenswrapper[4943]: I1129 07:57:22.895080 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerStarted","Data":"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8"} Nov 29 07:57:22 crc kubenswrapper[4943]: I1129 07:57:22.921511 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rfskk" podStartSLOduration=3.318786902 podStartE2EDuration="6.921491056s" podCreationTimestamp="2025-11-29 07:57:16 +0000 UTC" firstStartedPulling="2025-11-29 07:57:18.845622847 +0000 UTC m=+5013.775711610" lastFinishedPulling="2025-11-29 07:57:22.448327011 +0000 UTC m=+5017.378415764" observedRunningTime="2025-11-29 07:57:22.918743361 +0000 UTC m=+5017.848832174" watchObservedRunningTime="2025-11-29 07:57:22.921491056 +0000 UTC m=+5017.851579809" Nov 29 07:57:27 crc kubenswrapper[4943]: I1129 07:57:27.322110 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:27 crc kubenswrapper[4943]: I1129 07:57:27.322699 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:27 crc kubenswrapper[4943]: I1129 07:57:27.372185 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:28 crc kubenswrapper[4943]: I1129 07:57:28.002067 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:28 crc kubenswrapper[4943]: I1129 07:57:28.054194 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:29 crc kubenswrapper[4943]: I1129 07:57:29.974119 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rfskk" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="registry-server" containerID="cri-o://25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8" gracePeriod=2 Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.472476 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.512060 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cj77\" (UniqueName: \"kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77\") pod \"e2037b77-79d0-4633-94fc-256885b83add\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.512206 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content\") pod \"e2037b77-79d0-4633-94fc-256885b83add\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.512381 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities\") pod \"e2037b77-79d0-4633-94fc-256885b83add\" (UID: \"e2037b77-79d0-4633-94fc-256885b83add\") " Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.513041 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities" (OuterVolumeSpecName: "utilities") pod "e2037b77-79d0-4633-94fc-256885b83add" (UID: "e2037b77-79d0-4633-94fc-256885b83add"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.519639 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77" (OuterVolumeSpecName: "kube-api-access-8cj77") pod "e2037b77-79d0-4633-94fc-256885b83add" (UID: "e2037b77-79d0-4633-94fc-256885b83add"). InnerVolumeSpecName "kube-api-access-8cj77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.567896 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2037b77-79d0-4633-94fc-256885b83add" (UID: "e2037b77-79d0-4633-94fc-256885b83add"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.615240 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.615290 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cj77\" (UniqueName: \"kubernetes.io/projected/e2037b77-79d0-4633-94fc-256885b83add-kube-api-access-8cj77\") on node \"crc\" DevicePath \"\"" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.615311 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2037b77-79d0-4633-94fc-256885b83add-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.986783 4943 generic.go:334] "Generic (PLEG): container finished" podID="e2037b77-79d0-4633-94fc-256885b83add" containerID="25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8" exitCode=0 Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.986889 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfskk" Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.986902 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerDied","Data":"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8"} Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.988641 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfskk" event={"ID":"e2037b77-79d0-4633-94fc-256885b83add","Type":"ContainerDied","Data":"db48ea65f707f5ef6f4e231b42143221f27f2526b81835f4e23f939dd1320cf9"} Nov 29 07:57:30 crc kubenswrapper[4943]: I1129 07:57:30.988665 4943 scope.go:117] "RemoveContainer" containerID="25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.378978 4943 scope.go:117] "RemoveContainer" containerID="46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.407925 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.418467 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rfskk"] Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.419879 4943 scope.go:117] "RemoveContainer" containerID="08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.456748 4943 scope.go:117] "RemoveContainer" containerID="25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8" Nov 29 07:57:31 crc kubenswrapper[4943]: E1129 07:57:31.457540 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8\": container with ID starting with 25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8 not found: ID does not exist" containerID="25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.457670 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8"} err="failed to get container status \"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8\": rpc error: code = NotFound desc = could not find container \"25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8\": container with ID starting with 25f5a4c6c7c1b826fe1da79e1163ff1d8d9468ea775f4f295842250d3d6b4db8 not found: ID does not exist" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.457719 4943 scope.go:117] "RemoveContainer" containerID="46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed" Nov 29 07:57:31 crc kubenswrapper[4943]: E1129 07:57:31.458451 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed\": container with ID starting with 46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed not found: ID does not exist" containerID="46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.458516 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed"} err="failed to get container status \"46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed\": rpc error: code = NotFound desc = could not find container \"46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed\": container with ID starting with 46413bf3cd3aa102b1670c6221c1ed41963f945c5dc4fcea12ac69e62a4607ed not found: ID does not exist" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.458556 4943 scope.go:117] "RemoveContainer" containerID="08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c" Nov 29 07:57:31 crc kubenswrapper[4943]: E1129 07:57:31.459134 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c\": container with ID starting with 08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c not found: ID does not exist" containerID="08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c" Nov 29 07:57:31 crc kubenswrapper[4943]: I1129 07:57:31.459181 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c"} err="failed to get container status \"08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c\": rpc error: code = NotFound desc = could not find container \"08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c\": container with ID starting with 08ca0ab3b6ce028d4749fe219aa2b8fa2f76c51ca96cbae3122718ae388f652c not found: ID does not exist" Nov 29 07:57:32 crc kubenswrapper[4943]: I1129 07:57:32.613694 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:57:32 crc kubenswrapper[4943]: I1129 07:57:32.614083 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:57:33 crc kubenswrapper[4943]: I1129 07:57:33.338180 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2037b77-79d0-4633-94fc-256885b83add" path="/var/lib/kubelet/pods/e2037b77-79d0-4633-94fc-256885b83add/volumes" Nov 29 07:57:54 crc kubenswrapper[4943]: I1129 07:57:54.617895 4943 scope.go:117] "RemoveContainer" containerID="87dc35cd40cf69972273e35c7178db0754791d0c9903ace3147529c5d7e1b7a2" Nov 29 07:57:54 crc kubenswrapper[4943]: I1129 07:57:54.646124 4943 scope.go:117] "RemoveContainer" containerID="f57733cbd771d9b14f6636e01a0b1b3660f074cc9c1d32d021e83c7a572f150a" Nov 29 07:58:02 crc kubenswrapper[4943]: I1129 07:58:02.932228 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 07:58:02 crc kubenswrapper[4943]: I1129 07:58:02.932943 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 07:58:02 crc kubenswrapper[4943]: I1129 07:58:02.932995 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 07:58:02 crc kubenswrapper[4943]: I1129 07:58:02.933746 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 07:58:02 crc kubenswrapper[4943]: I1129 07:58:02.933825 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230" gracePeriod=600 Nov 29 07:58:03 crc kubenswrapper[4943]: I1129 07:58:03.956142 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230" exitCode=0 Nov 29 07:58:03 crc kubenswrapper[4943]: I1129 07:58:03.956498 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230"} Nov 29 07:58:03 crc kubenswrapper[4943]: I1129 07:58:03.956530 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279"} Nov 29 07:58:03 crc kubenswrapper[4943]: I1129 07:58:03.956548 4943 scope.go:117] "RemoveContainer" containerID="1a7caae7b91e0a621957a7be9384d445b66a21655cf9d360e402809aa28709d4" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.147184 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg"] Nov 29 08:00:00 crc kubenswrapper[4943]: E1129 08:00:00.148105 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="extract-utilities" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.148117 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="extract-utilities" Nov 29 08:00:00 crc kubenswrapper[4943]: E1129 08:00:00.148139 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="extract-content" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.148145 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="extract-content" Nov 29 08:00:00 crc kubenswrapper[4943]: E1129 08:00:00.148159 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="registry-server" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.148165 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="registry-server" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.148320 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2037b77-79d0-4633-94fc-256885b83add" containerName="registry-server" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.148966 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.150663 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.150707 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.162003 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg"] Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.257679 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.258000 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.258162 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsc8f\" (UniqueName: \"kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.360604 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.360989 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.361030 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsc8f\" (UniqueName: \"kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.362326 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.367741 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.395152 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsc8f\" (UniqueName: \"kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f\") pod \"collect-profiles-29406720-vv4fg\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.468144 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:00 crc kubenswrapper[4943]: I1129 08:00:00.977128 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg"] Nov 29 08:00:01 crc kubenswrapper[4943]: I1129 08:00:01.035132 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" event={"ID":"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815","Type":"ContainerStarted","Data":"fcc224ba8a56832e1d2b28c5e05e43366ee04c836a024e2694daff2719619d49"} Nov 29 08:00:02 crc kubenswrapper[4943]: I1129 08:00:02.046596 4943 generic.go:334] "Generic (PLEG): container finished" podID="0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" containerID="db8f8c4667698ef3b4fdd10d8b5d7917bb3fe097c4b22d0296e5d1dcdff4bc68" exitCode=0 Nov 29 08:00:02 crc kubenswrapper[4943]: I1129 08:00:02.046873 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" event={"ID":"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815","Type":"ContainerDied","Data":"db8f8c4667698ef3b4fdd10d8b5d7917bb3fe097c4b22d0296e5d1dcdff4bc68"} Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.558821 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.627583 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsc8f\" (UniqueName: \"kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f\") pod \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.627795 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume\") pod \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.627953 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume\") pod \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\" (UID: \"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815\") " Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.628745 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume" (OuterVolumeSpecName: "config-volume") pod "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" (UID: "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.634963 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" (UID: "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.635893 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f" (OuterVolumeSpecName: "kube-api-access-nsc8f") pod "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" (UID: "0bd43e07-d7db-4cb0-8e9a-6bc323cf5815"). InnerVolumeSpecName "kube-api-access-nsc8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.730811 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.730851 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:00:03 crc kubenswrapper[4943]: I1129 08:00:03.730861 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsc8f\" (UniqueName: \"kubernetes.io/projected/0bd43e07-d7db-4cb0-8e9a-6bc323cf5815-kube-api-access-nsc8f\") on node \"crc\" DevicePath \"\"" Nov 29 08:00:04 crc kubenswrapper[4943]: I1129 08:00:04.070912 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" Nov 29 08:00:04 crc kubenswrapper[4943]: I1129 08:00:04.070899 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406720-vv4fg" event={"ID":"0bd43e07-d7db-4cb0-8e9a-6bc323cf5815","Type":"ContainerDied","Data":"fcc224ba8a56832e1d2b28c5e05e43366ee04c836a024e2694daff2719619d49"} Nov 29 08:00:04 crc kubenswrapper[4943]: I1129 08:00:04.071311 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcc224ba8a56832e1d2b28c5e05e43366ee04c836a024e2694daff2719619d49" Nov 29 08:00:04 crc kubenswrapper[4943]: I1129 08:00:04.645647 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm"] Nov 29 08:00:04 crc kubenswrapper[4943]: I1129 08:00:04.656817 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406675-flbnm"] Nov 29 08:00:05 crc kubenswrapper[4943]: I1129 08:00:05.339009 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b482498-960e-427f-a487-36821bcf511e" path="/var/lib/kubelet/pods/1b482498-960e-427f-a487-36821bcf511e/volumes" Nov 29 08:00:32 crc kubenswrapper[4943]: I1129 08:00:32.613331 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:00:32 crc kubenswrapper[4943]: I1129 08:00:32.614734 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:00:54 crc kubenswrapper[4943]: I1129 08:00:54.895158 4943 scope.go:117] "RemoveContainer" containerID="2b49d16071ce569ba7ca1d69b6d59d1a6ff1c6da192906abda0037f88e8d7115" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.156293 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29406721-ljt9k"] Nov 29 08:01:00 crc kubenswrapper[4943]: E1129 08:01:00.157550 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" containerName="collect-profiles" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.157585 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" containerName="collect-profiles" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.157802 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd43e07-d7db-4cb0-8e9a-6bc323cf5815" containerName="collect-profiles" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.158636 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.170597 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29406721-ljt9k"] Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.344137 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.344335 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.344373 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.344444 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvc46\" (UniqueName: \"kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.445536 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.445941 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvc46\" (UniqueName: \"kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.446029 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.446228 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.451958 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.452023 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.453973 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.469110 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvc46\" (UniqueName: \"kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46\") pod \"keystone-cron-29406721-ljt9k\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.508033 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:00 crc kubenswrapper[4943]: I1129 08:01:00.944116 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29406721-ljt9k"] Nov 29 08:01:00 crc kubenswrapper[4943]: W1129 08:01:00.964482 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded4ca698_c9b4_458e_bf3a_ffba01a3c728.slice/crio-a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2 WatchSource:0}: Error finding container a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2: Status 404 returned error can't find the container with id a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2 Nov 29 08:01:01 crc kubenswrapper[4943]: I1129 08:01:01.611349 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406721-ljt9k" event={"ID":"ed4ca698-c9b4-458e-bf3a-ffba01a3c728","Type":"ContainerStarted","Data":"764f96fce8d5c6910cd44423fdcbd62b5646cb7b7b337732183e56738e1c2aca"} Nov 29 08:01:01 crc kubenswrapper[4943]: I1129 08:01:01.611743 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406721-ljt9k" event={"ID":"ed4ca698-c9b4-458e-bf3a-ffba01a3c728","Type":"ContainerStarted","Data":"a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2"} Nov 29 08:01:01 crc kubenswrapper[4943]: I1129 08:01:01.640961 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29406721-ljt9k" podStartSLOduration=1.640938023 podStartE2EDuration="1.640938023s" podCreationTimestamp="2025-11-29 08:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 08:01:01.632768588 +0000 UTC m=+5236.562857341" watchObservedRunningTime="2025-11-29 08:01:01.640938023 +0000 UTC m=+5236.571026776" Nov 29 08:01:02 crc kubenswrapper[4943]: I1129 08:01:02.613010 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:01:02 crc kubenswrapper[4943]: I1129 08:01:02.613414 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:01:04 crc kubenswrapper[4943]: I1129 08:01:04.643984 4943 generic.go:334] "Generic (PLEG): container finished" podID="ed4ca698-c9b4-458e-bf3a-ffba01a3c728" containerID="764f96fce8d5c6910cd44423fdcbd62b5646cb7b7b337732183e56738e1c2aca" exitCode=0 Nov 29 08:01:04 crc kubenswrapper[4943]: I1129 08:01:04.644072 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406721-ljt9k" event={"ID":"ed4ca698-c9b4-458e-bf3a-ffba01a3c728","Type":"ContainerDied","Data":"764f96fce8d5c6910cd44423fdcbd62b5646cb7b7b337732183e56738e1c2aca"} Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.235250 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.389074 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle\") pod \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.389221 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvc46\" (UniqueName: \"kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46\") pod \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.389414 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data\") pod \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.389502 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys\") pod \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\" (UID: \"ed4ca698-c9b4-458e-bf3a-ffba01a3c728\") " Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.396387 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ed4ca698-c9b4-458e-bf3a-ffba01a3c728" (UID: "ed4ca698-c9b4-458e-bf3a-ffba01a3c728"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.398139 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46" (OuterVolumeSpecName: "kube-api-access-mvc46") pod "ed4ca698-c9b4-458e-bf3a-ffba01a3c728" (UID: "ed4ca698-c9b4-458e-bf3a-ffba01a3c728"). InnerVolumeSpecName "kube-api-access-mvc46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.430049 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed4ca698-c9b4-458e-bf3a-ffba01a3c728" (UID: "ed4ca698-c9b4-458e-bf3a-ffba01a3c728"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.462188 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data" (OuterVolumeSpecName: "config-data") pod "ed4ca698-c9b4-458e-bf3a-ffba01a3c728" (UID: "ed4ca698-c9b4-458e-bf3a-ffba01a3c728"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.492038 4943 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.492067 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvc46\" (UniqueName: \"kubernetes.io/projected/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-kube-api-access-mvc46\") on node \"crc\" DevicePath \"\"" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.492078 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.492086 4943 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed4ca698-c9b4-458e-bf3a-ffba01a3c728-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.667048 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406721-ljt9k" event={"ID":"ed4ca698-c9b4-458e-bf3a-ffba01a3c728","Type":"ContainerDied","Data":"a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2"} Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.667319 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7e3c913cf73c4cad7f2e9bc769cecb00bda761da45c657be3fb605cfb6365c2" Nov 29 08:01:06 crc kubenswrapper[4943]: I1129 08:01:06.667218 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406721-ljt9k" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.612907 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.614756 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.614899 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.615795 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.615965 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" gracePeriod=600 Nov 29 08:01:32 crc kubenswrapper[4943]: E1129 08:01:32.748903 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.935958 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" exitCode=0 Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.936025 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279"} Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.936069 4943 scope.go:117] "RemoveContainer" containerID="dcfea8ec18701b707887e98b98b2d5d4621e274425dd25793a37d38ab2d2b230" Nov 29 08:01:32 crc kubenswrapper[4943]: I1129 08:01:32.936762 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:01:32 crc kubenswrapper[4943]: E1129 08:01:32.937074 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:01:37 crc kubenswrapper[4943]: I1129 08:01:37.052979 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-s7pc5"] Nov 29 08:01:37 crc kubenswrapper[4943]: I1129 08:01:37.061058 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-s7pc5"] Nov 29 08:01:38 crc kubenswrapper[4943]: I1129 08:01:38.017556 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e579c42e-87cb-4c76-9a44-9dcafb37f721" path="/var/lib/kubelet/pods/e579c42e-87cb-4c76-9a44-9dcafb37f721/volumes" Nov 29 08:01:39 crc kubenswrapper[4943]: I1129 08:01:39.028671 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-b898-account-create-update-qn2l8"] Nov 29 08:01:39 crc kubenswrapper[4943]: I1129 08:01:39.039902 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-b898-account-create-update-qn2l8"] Nov 29 08:01:39 crc kubenswrapper[4943]: I1129 08:01:39.341663 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a74bceb-8035-40a6-9a1f-99e60c2f4108" path="/var/lib/kubelet/pods/3a74bceb-8035-40a6-9a1f-99e60c2f4108/volumes" Nov 29 08:01:45 crc kubenswrapper[4943]: I1129 08:01:45.337270 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:01:45 crc kubenswrapper[4943]: E1129 08:01:45.338268 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:01:55 crc kubenswrapper[4943]: I1129 08:01:55.670903 4943 scope.go:117] "RemoveContainer" containerID="f19ad05d5a9202ec37d0e1b21e4c59c9e858c45a1fbfd51bce2f9ece2e77ba3d" Nov 29 08:01:55 crc kubenswrapper[4943]: I1129 08:01:55.710530 4943 scope.go:117] "RemoveContainer" containerID="3d2c28e50cd6af020f8464bd70952f5ecb29d03ddeed1e4f4490b8b9a0bca011" Nov 29 08:02:00 crc kubenswrapper[4943]: I1129 08:02:00.328102 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:02:00 crc kubenswrapper[4943]: E1129 08:02:00.328868 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:02:11 crc kubenswrapper[4943]: I1129 08:02:11.045384 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-gz9x8"] Nov 29 08:02:11 crc kubenswrapper[4943]: I1129 08:02:11.058900 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-gz9x8"] Nov 29 08:02:11 crc kubenswrapper[4943]: I1129 08:02:11.339059 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62083f23-18f3-425f-bcae-74af545175f2" path="/var/lib/kubelet/pods/62083f23-18f3-425f-bcae-74af545175f2/volumes" Nov 29 08:02:12 crc kubenswrapper[4943]: I1129 08:02:12.327612 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:02:12 crc kubenswrapper[4943]: E1129 08:02:12.327980 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.503546 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:15 crc kubenswrapper[4943]: E1129 08:02:15.504520 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed4ca698-c9b4-458e-bf3a-ffba01a3c728" containerName="keystone-cron" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.504532 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed4ca698-c9b4-458e-bf3a-ffba01a3c728" containerName="keystone-cron" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.504721 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed4ca698-c9b4-458e-bf3a-ffba01a3c728" containerName="keystone-cron" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.506011 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.527457 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.571886 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.571977 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.572008 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndvp7\" (UniqueName: \"kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.674148 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.674273 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.674305 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndvp7\" (UniqueName: \"kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.674740 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.674835 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.696089 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndvp7\" (UniqueName: \"kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7\") pod \"certified-operators-hf2lx\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:15 crc kubenswrapper[4943]: I1129 08:02:15.836822 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:16 crc kubenswrapper[4943]: I1129 08:02:16.497418 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:17 crc kubenswrapper[4943]: I1129 08:02:17.362994 4943 generic.go:334] "Generic (PLEG): container finished" podID="72265ef1-1e19-457b-9683-62f867638066" containerID="f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294" exitCode=0 Nov 29 08:02:17 crc kubenswrapper[4943]: I1129 08:02:17.363055 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerDied","Data":"f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294"} Nov 29 08:02:17 crc kubenswrapper[4943]: I1129 08:02:17.363507 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerStarted","Data":"20c16acde9f560414517ef79f303db177b016b77df586f5c8bb257bf66c339ec"} Nov 29 08:02:17 crc kubenswrapper[4943]: I1129 08:02:17.367101 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:02:19 crc kubenswrapper[4943]: I1129 08:02:19.380408 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerStarted","Data":"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049"} Nov 29 08:02:20 crc kubenswrapper[4943]: I1129 08:02:20.391210 4943 generic.go:334] "Generic (PLEG): container finished" podID="72265ef1-1e19-457b-9683-62f867638066" containerID="8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049" exitCode=0 Nov 29 08:02:20 crc kubenswrapper[4943]: I1129 08:02:20.391398 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerDied","Data":"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049"} Nov 29 08:02:22 crc kubenswrapper[4943]: I1129 08:02:22.423016 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerStarted","Data":"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6"} Nov 29 08:02:22 crc kubenswrapper[4943]: I1129 08:02:22.447503 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hf2lx" podStartSLOduration=3.336629838 podStartE2EDuration="7.447481174s" podCreationTimestamp="2025-11-29 08:02:15 +0000 UTC" firstStartedPulling="2025-11-29 08:02:17.366779435 +0000 UTC m=+5312.296868188" lastFinishedPulling="2025-11-29 08:02:21.477630751 +0000 UTC m=+5316.407719524" observedRunningTime="2025-11-29 08:02:22.443005657 +0000 UTC m=+5317.373094460" watchObservedRunningTime="2025-11-29 08:02:22.447481174 +0000 UTC m=+5317.377569927" Nov 29 08:02:25 crc kubenswrapper[4943]: I1129 08:02:25.836983 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:25 crc kubenswrapper[4943]: I1129 08:02:25.837568 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:25 crc kubenswrapper[4943]: I1129 08:02:25.898142 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:26 crc kubenswrapper[4943]: I1129 08:02:26.507021 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:26 crc kubenswrapper[4943]: I1129 08:02:26.570887 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:27 crc kubenswrapper[4943]: I1129 08:02:27.328739 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:02:27 crc kubenswrapper[4943]: E1129 08:02:27.329005 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:02:28 crc kubenswrapper[4943]: I1129 08:02:28.470738 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hf2lx" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="registry-server" containerID="cri-o://531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6" gracePeriod=2 Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.147847 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.240338 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndvp7\" (UniqueName: \"kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7\") pod \"72265ef1-1e19-457b-9683-62f867638066\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.240479 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content\") pod \"72265ef1-1e19-457b-9683-62f867638066\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.240535 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities\") pod \"72265ef1-1e19-457b-9683-62f867638066\" (UID: \"72265ef1-1e19-457b-9683-62f867638066\") " Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.241254 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities" (OuterVolumeSpecName: "utilities") pod "72265ef1-1e19-457b-9683-62f867638066" (UID: "72265ef1-1e19-457b-9683-62f867638066"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.246534 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7" (OuterVolumeSpecName: "kube-api-access-ndvp7") pod "72265ef1-1e19-457b-9683-62f867638066" (UID: "72265ef1-1e19-457b-9683-62f867638066"). InnerVolumeSpecName "kube-api-access-ndvp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.293824 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72265ef1-1e19-457b-9683-62f867638066" (UID: "72265ef1-1e19-457b-9683-62f867638066"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.342214 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.342249 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72265ef1-1e19-457b-9683-62f867638066-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.342260 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndvp7\" (UniqueName: \"kubernetes.io/projected/72265ef1-1e19-457b-9683-62f867638066-kube-api-access-ndvp7\") on node \"crc\" DevicePath \"\"" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.482096 4943 generic.go:334] "Generic (PLEG): container finished" podID="72265ef1-1e19-457b-9683-62f867638066" containerID="531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6" exitCode=0 Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.482137 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerDied","Data":"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6"} Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.482191 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hf2lx" event={"ID":"72265ef1-1e19-457b-9683-62f867638066","Type":"ContainerDied","Data":"20c16acde9f560414517ef79f303db177b016b77df586f5c8bb257bf66c339ec"} Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.482188 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hf2lx" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.482302 4943 scope.go:117] "RemoveContainer" containerID="531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.508821 4943 scope.go:117] "RemoveContainer" containerID="8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.509677 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.518070 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hf2lx"] Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.550100 4943 scope.go:117] "RemoveContainer" containerID="f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.591521 4943 scope.go:117] "RemoveContainer" containerID="531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6" Nov 29 08:02:29 crc kubenswrapper[4943]: E1129 08:02:29.592238 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6\": container with ID starting with 531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6 not found: ID does not exist" containerID="531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.592267 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6"} err="failed to get container status \"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6\": rpc error: code = NotFound desc = could not find container \"531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6\": container with ID starting with 531f306d3e393390e39a429c55518fe070adc6005634f859ed42d56129eb63d6 not found: ID does not exist" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.592288 4943 scope.go:117] "RemoveContainer" containerID="8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049" Nov 29 08:02:29 crc kubenswrapper[4943]: E1129 08:02:29.592552 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049\": container with ID starting with 8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049 not found: ID does not exist" containerID="8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.592590 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049"} err="failed to get container status \"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049\": rpc error: code = NotFound desc = could not find container \"8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049\": container with ID starting with 8d3a1a7b9d7b243d186f8590081249b4bcca5877c7ea3d5f17b97ef15434a049 not found: ID does not exist" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.592604 4943 scope.go:117] "RemoveContainer" containerID="f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294" Nov 29 08:02:29 crc kubenswrapper[4943]: E1129 08:02:29.592893 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294\": container with ID starting with f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294 not found: ID does not exist" containerID="f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294" Nov 29 08:02:29 crc kubenswrapper[4943]: I1129 08:02:29.592921 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294"} err="failed to get container status \"f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294\": rpc error: code = NotFound desc = could not find container \"f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294\": container with ID starting with f166f7a5f5e558bc69f8e529c4ac48959f9d1d98527aa1066128e6a8b876c294 not found: ID does not exist" Nov 29 08:02:31 crc kubenswrapper[4943]: I1129 08:02:31.339541 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72265ef1-1e19-457b-9683-62f867638066" path="/var/lib/kubelet/pods/72265ef1-1e19-457b-9683-62f867638066/volumes" Nov 29 08:02:41 crc kubenswrapper[4943]: I1129 08:02:41.327831 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:02:41 crc kubenswrapper[4943]: E1129 08:02:41.328715 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:02:54 crc kubenswrapper[4943]: I1129 08:02:54.327375 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:02:54 crc kubenswrapper[4943]: E1129 08:02:54.328077 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:02:55 crc kubenswrapper[4943]: I1129 08:02:55.806306 4943 scope.go:117] "RemoveContainer" containerID="4a58ef8fc7275beff7a920905e2f97780c3ba1a377778c93111b8828abdc6466" Nov 29 08:03:09 crc kubenswrapper[4943]: I1129 08:03:09.328480 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:03:09 crc kubenswrapper[4943]: E1129 08:03:09.329646 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.407051 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:10 crc kubenswrapper[4943]: E1129 08:03:10.407711 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="registry-server" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.407724 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="registry-server" Nov 29 08:03:10 crc kubenswrapper[4943]: E1129 08:03:10.407745 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="extract-utilities" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.407752 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="extract-utilities" Nov 29 08:03:10 crc kubenswrapper[4943]: E1129 08:03:10.407770 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="extract-content" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.407786 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="extract-content" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.407967 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="72265ef1-1e19-457b-9683-62f867638066" containerName="registry-server" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.412956 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.420686 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.502304 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.502353 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsfl9\" (UniqueName: \"kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.502449 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.604898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.604949 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsfl9\" (UniqueName: \"kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.605033 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.605598 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.605803 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.626943 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsfl9\" (UniqueName: \"kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9\") pod \"redhat-operators-5c62v\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:10 crc kubenswrapper[4943]: I1129 08:03:10.744626 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:11 crc kubenswrapper[4943]: I1129 08:03:11.278935 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:11 crc kubenswrapper[4943]: I1129 08:03:11.868813 4943 generic.go:334] "Generic (PLEG): container finished" podID="9be0ebed-aca9-47fd-8230-f9291564b151" containerID="9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a" exitCode=0 Nov 29 08:03:11 crc kubenswrapper[4943]: I1129 08:03:11.868953 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerDied","Data":"9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a"} Nov 29 08:03:11 crc kubenswrapper[4943]: I1129 08:03:11.869141 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerStarted","Data":"57dc5f1f4a8b04cbe72d7161aeba45b43273fbc1785291905f82bacb877c5e43"} Nov 29 08:03:12 crc kubenswrapper[4943]: I1129 08:03:12.882118 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerStarted","Data":"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac"} Nov 29 08:03:13 crc kubenswrapper[4943]: I1129 08:03:13.892901 4943 generic.go:334] "Generic (PLEG): container finished" podID="9be0ebed-aca9-47fd-8230-f9291564b151" containerID="16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac" exitCode=0 Nov 29 08:03:13 crc kubenswrapper[4943]: I1129 08:03:13.893237 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerDied","Data":"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac"} Nov 29 08:03:16 crc kubenswrapper[4943]: I1129 08:03:16.926972 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerStarted","Data":"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec"} Nov 29 08:03:16 crc kubenswrapper[4943]: I1129 08:03:16.959878 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5c62v" podStartSLOduration=3.119056365 podStartE2EDuration="6.959852254s" podCreationTimestamp="2025-11-29 08:03:10 +0000 UTC" firstStartedPulling="2025-11-29 08:03:11.870521789 +0000 UTC m=+5366.800610542" lastFinishedPulling="2025-11-29 08:03:15.711317688 +0000 UTC m=+5370.641406431" observedRunningTime="2025-11-29 08:03:16.94838581 +0000 UTC m=+5371.878474573" watchObservedRunningTime="2025-11-29 08:03:16.959852254 +0000 UTC m=+5371.889941007" Nov 29 08:03:20 crc kubenswrapper[4943]: I1129 08:03:20.745340 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:20 crc kubenswrapper[4943]: I1129 08:03:20.747874 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:21 crc kubenswrapper[4943]: I1129 08:03:21.328298 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:03:21 crc kubenswrapper[4943]: E1129 08:03:21.328662 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:03:21 crc kubenswrapper[4943]: I1129 08:03:21.811594 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5c62v" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="registry-server" probeResult="failure" output=< Nov 29 08:03:21 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 08:03:21 crc kubenswrapper[4943]: > Nov 29 08:03:30 crc kubenswrapper[4943]: I1129 08:03:30.794213 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:30 crc kubenswrapper[4943]: I1129 08:03:30.840629 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:31 crc kubenswrapper[4943]: I1129 08:03:31.032314 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.055658 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5c62v" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="registry-server" containerID="cri-o://222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec" gracePeriod=2 Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.577105 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.655102 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities\") pod \"9be0ebed-aca9-47fd-8230-f9291564b151\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.655156 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content\") pod \"9be0ebed-aca9-47fd-8230-f9291564b151\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.655326 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsfl9\" (UniqueName: \"kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9\") pod \"9be0ebed-aca9-47fd-8230-f9291564b151\" (UID: \"9be0ebed-aca9-47fd-8230-f9291564b151\") " Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.656143 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities" (OuterVolumeSpecName: "utilities") pod "9be0ebed-aca9-47fd-8230-f9291564b151" (UID: "9be0ebed-aca9-47fd-8230-f9291564b151"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.661048 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9" (OuterVolumeSpecName: "kube-api-access-xsfl9") pod "9be0ebed-aca9-47fd-8230-f9291564b151" (UID: "9be0ebed-aca9-47fd-8230-f9291564b151"). InnerVolumeSpecName "kube-api-access-xsfl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.758022 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.758257 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsfl9\" (UniqueName: \"kubernetes.io/projected/9be0ebed-aca9-47fd-8230-f9291564b151-kube-api-access-xsfl9\") on node \"crc\" DevicePath \"\"" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.759464 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9be0ebed-aca9-47fd-8230-f9291564b151" (UID: "9be0ebed-aca9-47fd-8230-f9291564b151"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:03:32 crc kubenswrapper[4943]: I1129 08:03:32.860358 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be0ebed-aca9-47fd-8230-f9291564b151-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.074183 4943 generic.go:334] "Generic (PLEG): container finished" podID="9be0ebed-aca9-47fd-8230-f9291564b151" containerID="222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec" exitCode=0 Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.074239 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerDied","Data":"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec"} Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.074281 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5c62v" event={"ID":"9be0ebed-aca9-47fd-8230-f9291564b151","Type":"ContainerDied","Data":"57dc5f1f4a8b04cbe72d7161aeba45b43273fbc1785291905f82bacb877c5e43"} Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.074297 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5c62v" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.074313 4943 scope.go:117] "RemoveContainer" containerID="222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.118145 4943 scope.go:117] "RemoveContainer" containerID="16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.122954 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.137819 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5c62v"] Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.145366 4943 scope.go:117] "RemoveContainer" containerID="9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.205954 4943 scope.go:117] "RemoveContainer" containerID="222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec" Nov 29 08:03:33 crc kubenswrapper[4943]: E1129 08:03:33.206639 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec\": container with ID starting with 222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec not found: ID does not exist" containerID="222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.206693 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec"} err="failed to get container status \"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec\": rpc error: code = NotFound desc = could not find container \"222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec\": container with ID starting with 222cb9f3d5dfc361a4b01f4763f4edb039ef04b596b3a0ddfab04f7c40106dec not found: ID does not exist" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.206725 4943 scope.go:117] "RemoveContainer" containerID="16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac" Nov 29 08:03:33 crc kubenswrapper[4943]: E1129 08:03:33.207066 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac\": container with ID starting with 16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac not found: ID does not exist" containerID="16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.207110 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac"} err="failed to get container status \"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac\": rpc error: code = NotFound desc = could not find container \"16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac\": container with ID starting with 16a4a84c911498269e89b9b004bd28c7249d9e9cc9510fbdccbf9619e99feaac not found: ID does not exist" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.207141 4943 scope.go:117] "RemoveContainer" containerID="9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a" Nov 29 08:03:33 crc kubenswrapper[4943]: E1129 08:03:33.207425 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a\": container with ID starting with 9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a not found: ID does not exist" containerID="9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.207456 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a"} err="failed to get container status \"9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a\": rpc error: code = NotFound desc = could not find container \"9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a\": container with ID starting with 9733856da702a083e37770060a687cede386808a83a23e22d5db21e1b6c6648a not found: ID does not exist" Nov 29 08:03:33 crc kubenswrapper[4943]: I1129 08:03:33.343642 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" path="/var/lib/kubelet/pods/9be0ebed-aca9-47fd-8230-f9291564b151/volumes" Nov 29 08:03:36 crc kubenswrapper[4943]: I1129 08:03:36.327333 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:03:36 crc kubenswrapper[4943]: E1129 08:03:36.328351 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:03:51 crc kubenswrapper[4943]: I1129 08:03:51.328061 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:03:51 crc kubenswrapper[4943]: E1129 08:03:51.328819 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:04:05 crc kubenswrapper[4943]: I1129 08:04:05.336294 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:04:05 crc kubenswrapper[4943]: E1129 08:04:05.337440 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:04:20 crc kubenswrapper[4943]: I1129 08:04:20.327445 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:04:20 crc kubenswrapper[4943]: E1129 08:04:20.328358 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:04:34 crc kubenswrapper[4943]: I1129 08:04:34.328146 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:04:34 crc kubenswrapper[4943]: E1129 08:04:34.329343 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:04:47 crc kubenswrapper[4943]: I1129 08:04:47.327226 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:04:47 crc kubenswrapper[4943]: E1129 08:04:47.327995 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:04:59 crc kubenswrapper[4943]: I1129 08:04:59.328297 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:04:59 crc kubenswrapper[4943]: E1129 08:04:59.329445 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:05:14 crc kubenswrapper[4943]: I1129 08:05:14.327663 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:05:14 crc kubenswrapper[4943]: E1129 08:05:14.328472 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:05:27 crc kubenswrapper[4943]: I1129 08:05:27.327742 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:05:27 crc kubenswrapper[4943]: E1129 08:05:27.328680 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:05:42 crc kubenswrapper[4943]: I1129 08:05:42.327217 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:05:42 crc kubenswrapper[4943]: E1129 08:05:42.328005 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:05:55 crc kubenswrapper[4943]: I1129 08:05:55.338490 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:05:55 crc kubenswrapper[4943]: E1129 08:05:55.339305 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:06:10 crc kubenswrapper[4943]: I1129 08:06:10.327577 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:06:10 crc kubenswrapper[4943]: E1129 08:06:10.328376 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:06:22 crc kubenswrapper[4943]: I1129 08:06:22.327780 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:06:22 crc kubenswrapper[4943]: E1129 08:06:22.329425 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:06:37 crc kubenswrapper[4943]: I1129 08:06:37.328032 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:06:37 crc kubenswrapper[4943]: I1129 08:06:37.698167 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8"} Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.851930 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mm6h6"] Nov 29 08:08:24 crc kubenswrapper[4943]: E1129 08:08:24.853324 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="registry-server" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.853356 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="registry-server" Nov 29 08:08:24 crc kubenswrapper[4943]: E1129 08:08:24.853393 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="extract-content" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.853406 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="extract-content" Nov 29 08:08:24 crc kubenswrapper[4943]: E1129 08:08:24.853439 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="extract-utilities" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.853455 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="extract-utilities" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.853946 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be0ebed-aca9-47fd-8230-f9291564b151" containerName="registry-server" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.856937 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.878521 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mm6h6"] Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.945403 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-utilities\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.945750 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89c9h\" (UniqueName: \"kubernetes.io/projected/b1f699ce-49f8-415d-a493-f43d14ab9f53-kube-api-access-89c9h\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:24 crc kubenswrapper[4943]: I1129 08:08:24.945902 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-catalog-content\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.047841 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-utilities\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.047982 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89c9h\" (UniqueName: \"kubernetes.io/projected/b1f699ce-49f8-415d-a493-f43d14ab9f53-kube-api-access-89c9h\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.048028 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-catalog-content\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.048364 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-utilities\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.048416 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f699ce-49f8-415d-a493-f43d14ab9f53-catalog-content\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.076924 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89c9h\" (UniqueName: \"kubernetes.io/projected/b1f699ce-49f8-415d-a493-f43d14ab9f53-kube-api-access-89c9h\") pod \"community-operators-mm6h6\" (UID: \"b1f699ce-49f8-415d-a493-f43d14ab9f53\") " pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.195759 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:08:25 crc kubenswrapper[4943]: I1129 08:08:25.767052 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mm6h6"] Nov 29 08:08:26 crc kubenswrapper[4943]: I1129 08:08:26.661964 4943 generic.go:334] "Generic (PLEG): container finished" podID="b1f699ce-49f8-415d-a493-f43d14ab9f53" containerID="34be830ba90e0ff79758a80eff4eb297c4b2261da9903f561d2beccf19550d32" exitCode=0 Nov 29 08:08:26 crc kubenswrapper[4943]: I1129 08:08:26.662062 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mm6h6" event={"ID":"b1f699ce-49f8-415d-a493-f43d14ab9f53","Type":"ContainerDied","Data":"34be830ba90e0ff79758a80eff4eb297c4b2261da9903f561d2beccf19550d32"} Nov 29 08:08:26 crc kubenswrapper[4943]: I1129 08:08:26.662519 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mm6h6" event={"ID":"b1f699ce-49f8-415d-a493-f43d14ab9f53","Type":"ContainerStarted","Data":"c24a854e763b18fa6f7db96fcac8fbc5fc2870ca7cdcd3b60aa1441df6de25fa"} Nov 29 08:08:26 crc kubenswrapper[4943]: I1129 08:08:26.663849 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.645642 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.650027 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.658241 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.799203 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.799436 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.799633 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94rxg\" (UniqueName: \"kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.901059 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.901157 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.901215 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94rxg\" (UniqueName: \"kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.901600 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.901763 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.924780 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94rxg\" (UniqueName: \"kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg\") pod \"redhat-marketplace-fcpt8\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:27 crc kubenswrapper[4943]: I1129 08:08:27.986134 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:08:28 crc kubenswrapper[4943]: I1129 08:08:28.529538 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:08:28 crc kubenswrapper[4943]: W1129 08:08:28.544758 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1815d314_bb1f_4630_b111_949ca3faf350.slice/crio-d3ba5b2dbbb7ca9f96f79d946e8a03d546262f2d80f4eee61644285d584eedf7 WatchSource:0}: Error finding container d3ba5b2dbbb7ca9f96f79d946e8a03d546262f2d80f4eee61644285d584eedf7: Status 404 returned error can't find the container with id d3ba5b2dbbb7ca9f96f79d946e8a03d546262f2d80f4eee61644285d584eedf7 Nov 29 08:08:28 crc kubenswrapper[4943]: I1129 08:08:28.690141 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerStarted","Data":"d3ba5b2dbbb7ca9f96f79d946e8a03d546262f2d80f4eee61644285d584eedf7"} Nov 29 08:08:29 crc kubenswrapper[4943]: I1129 08:08:29.701097 4943 generic.go:334] "Generic (PLEG): container finished" podID="1815d314-bb1f-4630-b111-949ca3faf350" containerID="de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411" exitCode=0 Nov 29 08:08:29 crc kubenswrapper[4943]: I1129 08:08:29.701412 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerDied","Data":"de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411"} Nov 29 08:09:02 crc kubenswrapper[4943]: I1129 08:09:02.613138 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:09:02 crc kubenswrapper[4943]: I1129 08:09:02.613735 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:09:08 crc kubenswrapper[4943]: I1129 08:09:08.086835 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerStarted","Data":"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab"} Nov 29 08:09:09 crc kubenswrapper[4943]: I1129 08:09:09.095925 4943 generic.go:334] "Generic (PLEG): container finished" podID="1815d314-bb1f-4630-b111-949ca3faf350" containerID="0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab" exitCode=0 Nov 29 08:09:09 crc kubenswrapper[4943]: I1129 08:09:09.096316 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerDied","Data":"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab"} Nov 29 08:09:18 crc kubenswrapper[4943]: I1129 08:09:18.192231 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerStarted","Data":"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63"} Nov 29 08:09:18 crc kubenswrapper[4943]: I1129 08:09:18.210205 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fcpt8" podStartSLOduration=3.142456873 podStartE2EDuration="51.210190067s" podCreationTimestamp="2025-11-29 08:08:27 +0000 UTC" firstStartedPulling="2025-11-29 08:08:29.704152601 +0000 UTC m=+5684.634241354" lastFinishedPulling="2025-11-29 08:09:17.771885795 +0000 UTC m=+5732.701974548" observedRunningTime="2025-11-29 08:09:18.205989136 +0000 UTC m=+5733.136077879" watchObservedRunningTime="2025-11-29 08:09:18.210190067 +0000 UTC m=+5733.140278820" Nov 29 08:09:27 crc kubenswrapper[4943]: I1129 08:09:27.986443 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:27 crc kubenswrapper[4943]: I1129 08:09:27.988157 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:28 crc kubenswrapper[4943]: I1129 08:09:28.053622 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:28 crc kubenswrapper[4943]: I1129 08:09:28.353203 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:28 crc kubenswrapper[4943]: I1129 08:09:28.870994 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:09:30 crc kubenswrapper[4943]: I1129 08:09:30.294468 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fcpt8" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="registry-server" containerID="cri-o://0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63" gracePeriod=2 Nov 29 08:09:30 crc kubenswrapper[4943]: I1129 08:09:30.972127 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.080679 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content\") pod \"1815d314-bb1f-4630-b111-949ca3faf350\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.080798 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities\") pod \"1815d314-bb1f-4630-b111-949ca3faf350\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.080873 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94rxg\" (UniqueName: \"kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg\") pod \"1815d314-bb1f-4630-b111-949ca3faf350\" (UID: \"1815d314-bb1f-4630-b111-949ca3faf350\") " Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.081815 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities" (OuterVolumeSpecName: "utilities") pod "1815d314-bb1f-4630-b111-949ca3faf350" (UID: "1815d314-bb1f-4630-b111-949ca3faf350"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.089240 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg" (OuterVolumeSpecName: "kube-api-access-94rxg") pod "1815d314-bb1f-4630-b111-949ca3faf350" (UID: "1815d314-bb1f-4630-b111-949ca3faf350"). InnerVolumeSpecName "kube-api-access-94rxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.099491 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1815d314-bb1f-4630-b111-949ca3faf350" (UID: "1815d314-bb1f-4630-b111-949ca3faf350"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.183089 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94rxg\" (UniqueName: \"kubernetes.io/projected/1815d314-bb1f-4630-b111-949ca3faf350-kube-api-access-94rxg\") on node \"crc\" DevicePath \"\"" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.183128 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.183138 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1815d314-bb1f-4630-b111-949ca3faf350-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.309536 4943 generic.go:334] "Generic (PLEG): container finished" podID="1815d314-bb1f-4630-b111-949ca3faf350" containerID="0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63" exitCode=0 Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.309649 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fcpt8" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.309629 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerDied","Data":"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63"} Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.309855 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fcpt8" event={"ID":"1815d314-bb1f-4630-b111-949ca3faf350","Type":"ContainerDied","Data":"d3ba5b2dbbb7ca9f96f79d946e8a03d546262f2d80f4eee61644285d584eedf7"} Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.309892 4943 scope.go:117] "RemoveContainer" containerID="0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.336804 4943 scope.go:117] "RemoveContainer" containerID="0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.365144 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.373350 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fcpt8"] Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.374201 4943 scope.go:117] "RemoveContainer" containerID="de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.442144 4943 scope.go:117] "RemoveContainer" containerID="0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63" Nov 29 08:09:31 crc kubenswrapper[4943]: E1129 08:09:31.443076 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63\": container with ID starting with 0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63 not found: ID does not exist" containerID="0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.443169 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63"} err="failed to get container status \"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63\": rpc error: code = NotFound desc = could not find container \"0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63\": container with ID starting with 0adf052277474609b9e96b6c40332837d1ebf47902f2e65711cf43ee0da4ed63 not found: ID does not exist" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.443277 4943 scope.go:117] "RemoveContainer" containerID="0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab" Nov 29 08:09:31 crc kubenswrapper[4943]: E1129 08:09:31.443692 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab\": container with ID starting with 0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab not found: ID does not exist" containerID="0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.443736 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab"} err="failed to get container status \"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab\": rpc error: code = NotFound desc = could not find container \"0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab\": container with ID starting with 0c76067b5ea976470bd468296c977156472095ad87e58dcb46e3be26e75584ab not found: ID does not exist" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.443765 4943 scope.go:117] "RemoveContainer" containerID="de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411" Nov 29 08:09:31 crc kubenswrapper[4943]: E1129 08:09:31.444101 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411\": container with ID starting with de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411 not found: ID does not exist" containerID="de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411" Nov 29 08:09:31 crc kubenswrapper[4943]: I1129 08:09:31.444129 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411"} err="failed to get container status \"de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411\": rpc error: code = NotFound desc = could not find container \"de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411\": container with ID starting with de74386178b58f007b074092e16124dc299adfc2df53cf98208e17877df5c411 not found: ID does not exist" Nov 29 08:09:32 crc kubenswrapper[4943]: I1129 08:09:32.613828 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:09:32 crc kubenswrapper[4943]: I1129 08:09:32.614210 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:09:33 crc kubenswrapper[4943]: I1129 08:09:33.339138 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1815d314-bb1f-4630-b111-949ca3faf350" path="/var/lib/kubelet/pods/1815d314-bb1f-4630-b111-949ca3faf350/volumes" Nov 29 08:09:50 crc kubenswrapper[4943]: E1129 08:09:50.087121 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage2786621207/1\": happened during read: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 08:09:50 crc kubenswrapper[4943]: E1129 08:09:50.087859 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89c9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mm6h6_openshift-marketplace(b1f699ce-49f8-415d-a493-f43d14ab9f53): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage2786621207/1\": happened during read: context canceled" logger="UnhandledError" Nov 29 08:09:50 crc kubenswrapper[4943]: E1129 08:09:50.089088 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage2786621207/1\\\": happened during read: context canceled\"" pod="openshift-marketplace/community-operators-mm6h6" podUID="b1f699ce-49f8-415d-a493-f43d14ab9f53" Nov 29 08:09:50 crc kubenswrapper[4943]: E1129 08:09:50.511008 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mm6h6" podUID="b1f699ce-49f8-415d-a493-f43d14ab9f53" Nov 29 08:10:02 crc kubenswrapper[4943]: I1129 08:10:02.614052 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:10:02 crc kubenswrapper[4943]: I1129 08:10:02.615321 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:10:02 crc kubenswrapper[4943]: I1129 08:10:02.615375 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:10:02 crc kubenswrapper[4943]: I1129 08:10:02.616401 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:10:02 crc kubenswrapper[4943]: I1129 08:10:02.616469 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8" gracePeriod=600 Nov 29 08:10:03 crc kubenswrapper[4943]: I1129 08:10:03.634675 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8" exitCode=0 Nov 29 08:10:03 crc kubenswrapper[4943]: I1129 08:10:03.634761 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8"} Nov 29 08:10:03 crc kubenswrapper[4943]: I1129 08:10:03.636273 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252"} Nov 29 08:10:03 crc kubenswrapper[4943]: I1129 08:10:03.636364 4943 scope.go:117] "RemoveContainer" containerID="2c33bb948a284682563394327c24d4478033a5c938a7175d6fbc23b5d5b79279" Nov 29 08:10:18 crc kubenswrapper[4943]: E1129 08:10:18.231622 4943 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://registry.redhat.io/redhat/community-operator-index:v4.18: Get \"https://registry.redhat.io/auth/realms/rhcc/protocol/redhat-docker-v2/auth?account=%7Cuhc-pool-e4a64f1e-752e-4ea6-a5c9-20066c148871&scope=repository%3Aredhat%2Fcommunity-operator-index%3Apull&service=docker-registry\": net/http: TLS handshake timeout" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 08:10:18 crc kubenswrapper[4943]: E1129 08:10:18.233303 4943 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89c9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mm6h6_openshift-marketplace(b1f699ce-49f8-415d-a493-f43d14ab9f53): ErrImagePull: initializing source docker://registry.redhat.io/redhat/community-operator-index:v4.18: Get \"https://registry.redhat.io/auth/realms/rhcc/protocol/redhat-docker-v2/auth?account=%7Cuhc-pool-e4a64f1e-752e-4ea6-a5c9-20066c148871&scope=repository%3Aredhat%2Fcommunity-operator-index%3Apull&service=docker-registry\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 29 08:10:18 crc kubenswrapper[4943]: E1129 08:10:18.234644 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"initializing source docker://registry.redhat.io/redhat/community-operator-index:v4.18: Get \\\"https://registry.redhat.io/auth/realms/rhcc/protocol/redhat-docker-v2/auth?account=%7Cuhc-pool-e4a64f1e-752e-4ea6-a5c9-20066c148871&scope=repository%3Aredhat%2Fcommunity-operator-index%3Apull&service=docker-registry\\\": net/http: TLS handshake timeout\"" pod="openshift-marketplace/community-operators-mm6h6" podUID="b1f699ce-49f8-415d-a493-f43d14ab9f53" Nov 29 08:10:31 crc kubenswrapper[4943]: E1129 08:10:31.329884 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mm6h6" podUID="b1f699ce-49f8-415d-a493-f43d14ab9f53" Nov 29 08:10:57 crc kubenswrapper[4943]: I1129 08:10:57.162201 4943 generic.go:334] "Generic (PLEG): container finished" podID="b1f699ce-49f8-415d-a493-f43d14ab9f53" containerID="02ca5ca429d53e025d65d1f555e24103791a90f6a08feec8788b7e71c008b128" exitCode=0 Nov 29 08:10:57 crc kubenswrapper[4943]: I1129 08:10:57.162286 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mm6h6" event={"ID":"b1f699ce-49f8-415d-a493-f43d14ab9f53","Type":"ContainerDied","Data":"02ca5ca429d53e025d65d1f555e24103791a90f6a08feec8788b7e71c008b128"} Nov 29 08:10:58 crc kubenswrapper[4943]: I1129 08:10:58.173508 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mm6h6" event={"ID":"b1f699ce-49f8-415d-a493-f43d14ab9f53","Type":"ContainerStarted","Data":"c568d5f7500d791d1ac1427cfbc7cfab92d2a9192fae1f7b8d2148f55b554798"} Nov 29 08:10:58 crc kubenswrapper[4943]: I1129 08:10:58.191792 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mm6h6" podStartSLOduration=3.126705245 podStartE2EDuration="2m34.191775077s" podCreationTimestamp="2025-11-29 08:08:24 +0000 UTC" firstStartedPulling="2025-11-29 08:08:26.663596649 +0000 UTC m=+5681.593685402" lastFinishedPulling="2025-11-29 08:10:57.728666491 +0000 UTC m=+5832.658755234" observedRunningTime="2025-11-29 08:10:58.189814639 +0000 UTC m=+5833.119903402" watchObservedRunningTime="2025-11-29 08:10:58.191775077 +0000 UTC m=+5833.121863830" Nov 29 08:11:05 crc kubenswrapper[4943]: I1129 08:11:05.196826 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:11:05 crc kubenswrapper[4943]: I1129 08:11:05.197573 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:11:05 crc kubenswrapper[4943]: I1129 08:11:05.244448 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:11:06 crc kubenswrapper[4943]: I1129 08:11:06.301722 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mm6h6" Nov 29 08:11:06 crc kubenswrapper[4943]: I1129 08:11:06.370511 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mm6h6"] Nov 29 08:11:06 crc kubenswrapper[4943]: I1129 08:11:06.424873 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 08:11:06 crc kubenswrapper[4943]: I1129 08:11:06.425668 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hqj99" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="registry-server" containerID="cri-o://df493a34249296dfb8d0ffdb0a33e5e403eb9ca415707f05a0ab5c785d236d0d" gracePeriod=2 Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.258907 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerID="df493a34249296dfb8d0ffdb0a33e5e403eb9ca415707f05a0ab5c785d236d0d" exitCode=0 Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.260315 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerDied","Data":"df493a34249296dfb8d0ffdb0a33e5e403eb9ca415707f05a0ab5c785d236d0d"} Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.260341 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hqj99" event={"ID":"2c0bfd16-93aa-4f3f-882a-6333420ed038","Type":"ContainerDied","Data":"df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e"} Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.260351 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df3b554030836ee54a08729cdf585c87b81c37c2f75f083b9fdc513e265c9f0e" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.346264 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hqj99" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.353976 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities\") pod \"2c0bfd16-93aa-4f3f-882a-6333420ed038\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.354025 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content\") pod \"2c0bfd16-93aa-4f3f-882a-6333420ed038\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.354106 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5xgg\" (UniqueName: \"kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg\") pod \"2c0bfd16-93aa-4f3f-882a-6333420ed038\" (UID: \"2c0bfd16-93aa-4f3f-882a-6333420ed038\") " Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.355031 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities" (OuterVolumeSpecName: "utilities") pod "2c0bfd16-93aa-4f3f-882a-6333420ed038" (UID: "2c0bfd16-93aa-4f3f-882a-6333420ed038"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.355664 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.359464 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg" (OuterVolumeSpecName: "kube-api-access-s5xgg") pod "2c0bfd16-93aa-4f3f-882a-6333420ed038" (UID: "2c0bfd16-93aa-4f3f-882a-6333420ed038"). InnerVolumeSpecName "kube-api-access-s5xgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.416698 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c0bfd16-93aa-4f3f-882a-6333420ed038" (UID: "2c0bfd16-93aa-4f3f-882a-6333420ed038"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.456899 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c0bfd16-93aa-4f3f-882a-6333420ed038-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:11:07 crc kubenswrapper[4943]: I1129 08:11:07.457228 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5xgg\" (UniqueName: \"kubernetes.io/projected/2c0bfd16-93aa-4f3f-882a-6333420ed038-kube-api-access-s5xgg\") on node \"crc\" DevicePath \"\"" Nov 29 08:11:08 crc kubenswrapper[4943]: I1129 08:11:08.268616 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hqj99" Nov 29 08:11:08 crc kubenswrapper[4943]: I1129 08:11:08.305109 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 08:11:08 crc kubenswrapper[4943]: I1129 08:11:08.314696 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hqj99"] Nov 29 08:11:09 crc kubenswrapper[4943]: I1129 08:11:09.345633 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" path="/var/lib/kubelet/pods/2c0bfd16-93aa-4f3f-882a-6333420ed038/volumes" Nov 29 08:11:56 crc kubenswrapper[4943]: I1129 08:11:56.099058 4943 scope.go:117] "RemoveContainer" containerID="58ec7b3a955edf867629f34068146db24deba884d46981263c3bf36aabf9d42d" Nov 29 08:11:56 crc kubenswrapper[4943]: I1129 08:11:56.143335 4943 scope.go:117] "RemoveContainer" containerID="df493a34249296dfb8d0ffdb0a33e5e403eb9ca415707f05a0ab5c785d236d0d" Nov 29 08:11:56 crc kubenswrapper[4943]: I1129 08:11:56.181092 4943 scope.go:117] "RemoveContainer" containerID="e90bf521231c4a745f16d2330c5c1c34ab4eb1574ba61bc6b55dc12b9dd01ab1" Nov 29 08:12:32 crc kubenswrapper[4943]: I1129 08:12:32.613389 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:12:32 crc kubenswrapper[4943]: I1129 08:12:32.614065 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.768828 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769758 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769771 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769789 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="extract-utilities" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769795 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="extract-utilities" Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769808 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="extract-content" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769814 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="extract-content" Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769829 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="extract-content" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769834 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="extract-content" Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769854 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="extract-utilities" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769860 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="extract-utilities" Nov 29 08:12:52 crc kubenswrapper[4943]: E1129 08:12:52.769873 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.769878 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.770039 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="1815d314-bb1f-4630-b111-949ca3faf350" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.770060 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c0bfd16-93aa-4f3f-882a-6333420ed038" containerName="registry-server" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.771581 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.784781 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.896372 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.896495 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.896687 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dtzx\" (UniqueName: \"kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.998976 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.999094 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.999255 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dtzx\" (UniqueName: \"kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.999802 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:52 crc kubenswrapper[4943]: I1129 08:12:52.999881 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:53 crc kubenswrapper[4943]: I1129 08:12:53.029340 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dtzx\" (UniqueName: \"kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx\") pod \"certified-operators-4dggt\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:53 crc kubenswrapper[4943]: I1129 08:12:53.091007 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:12:53 crc kubenswrapper[4943]: I1129 08:12:53.598766 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:12:54 crc kubenswrapper[4943]: I1129 08:12:54.225331 4943 generic.go:334] "Generic (PLEG): container finished" podID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerID="2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e" exitCode=0 Nov 29 08:12:54 crc kubenswrapper[4943]: I1129 08:12:54.225428 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerDied","Data":"2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e"} Nov 29 08:12:54 crc kubenswrapper[4943]: I1129 08:12:54.225667 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerStarted","Data":"1e8cfc0270a69b2b688f54c2f6f1aa20392ca6606107bb7bf8fcf07a0db03eab"} Nov 29 08:12:55 crc kubenswrapper[4943]: I1129 08:12:55.236599 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerStarted","Data":"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d"} Nov 29 08:12:56 crc kubenswrapper[4943]: I1129 08:12:56.245931 4943 generic.go:334] "Generic (PLEG): container finished" podID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerID="1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d" exitCode=0 Nov 29 08:12:56 crc kubenswrapper[4943]: I1129 08:12:56.246262 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerDied","Data":"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d"} Nov 29 08:12:57 crc kubenswrapper[4943]: I1129 08:12:57.257152 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerStarted","Data":"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818"} Nov 29 08:12:57 crc kubenswrapper[4943]: I1129 08:12:57.285620 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4dggt" podStartSLOduration=2.759556861 podStartE2EDuration="5.285597818s" podCreationTimestamp="2025-11-29 08:12:52 +0000 UTC" firstStartedPulling="2025-11-29 08:12:54.227262513 +0000 UTC m=+5949.157351266" lastFinishedPulling="2025-11-29 08:12:56.75330347 +0000 UTC m=+5951.683392223" observedRunningTime="2025-11-29 08:12:57.278270042 +0000 UTC m=+5952.208358805" watchObservedRunningTime="2025-11-29 08:12:57.285597818 +0000 UTC m=+5952.215686591" Nov 29 08:13:02 crc kubenswrapper[4943]: I1129 08:13:02.612822 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:13:02 crc kubenswrapper[4943]: I1129 08:13:02.613344 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:13:03 crc kubenswrapper[4943]: I1129 08:13:03.092140 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:03 crc kubenswrapper[4943]: I1129 08:13:03.092211 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:03 crc kubenswrapper[4943]: I1129 08:13:03.145246 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:03 crc kubenswrapper[4943]: I1129 08:13:03.357030 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:03 crc kubenswrapper[4943]: I1129 08:13:03.407402 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:13:05 crc kubenswrapper[4943]: I1129 08:13:05.481875 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4dggt" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="registry-server" containerID="cri-o://9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818" gracePeriod=2 Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.034003 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.185249 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dtzx\" (UniqueName: \"kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx\") pod \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.185473 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content\") pod \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.186343 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities\") pod \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\" (UID: \"9256db89-4298-4fa8-a6dc-0f7a6ca961b0\") " Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.187078 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities" (OuterVolumeSpecName: "utilities") pod "9256db89-4298-4fa8-a6dc-0f7a6ca961b0" (UID: "9256db89-4298-4fa8-a6dc-0f7a6ca961b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.187370 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.190937 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx" (OuterVolumeSpecName: "kube-api-access-6dtzx") pod "9256db89-4298-4fa8-a6dc-0f7a6ca961b0" (UID: "9256db89-4298-4fa8-a6dc-0f7a6ca961b0"). InnerVolumeSpecName "kube-api-access-6dtzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.227518 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9256db89-4298-4fa8-a6dc-0f7a6ca961b0" (UID: "9256db89-4298-4fa8-a6dc-0f7a6ca961b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.288946 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dtzx\" (UniqueName: \"kubernetes.io/projected/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-kube-api-access-6dtzx\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.289255 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9256db89-4298-4fa8-a6dc-0f7a6ca961b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.492076 4943 generic.go:334] "Generic (PLEG): container finished" podID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerID="9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818" exitCode=0 Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.492131 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerDied","Data":"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818"} Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.492155 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dggt" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.492183 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dggt" event={"ID":"9256db89-4298-4fa8-a6dc-0f7a6ca961b0","Type":"ContainerDied","Data":"1e8cfc0270a69b2b688f54c2f6f1aa20392ca6606107bb7bf8fcf07a0db03eab"} Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.492200 4943 scope.go:117] "RemoveContainer" containerID="9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.512142 4943 scope.go:117] "RemoveContainer" containerID="1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.526360 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.537156 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4dggt"] Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.547374 4943 scope.go:117] "RemoveContainer" containerID="2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.587115 4943 scope.go:117] "RemoveContainer" containerID="9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818" Nov 29 08:13:06 crc kubenswrapper[4943]: E1129 08:13:06.587683 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818\": container with ID starting with 9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818 not found: ID does not exist" containerID="9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.587837 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818"} err="failed to get container status \"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818\": rpc error: code = NotFound desc = could not find container \"9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818\": container with ID starting with 9b202919ab7831e21afb1c4f2d206bfbd2556258e345990f8dad8d3bfe8c3818 not found: ID does not exist" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.587972 4943 scope.go:117] "RemoveContainer" containerID="1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d" Nov 29 08:13:06 crc kubenswrapper[4943]: E1129 08:13:06.588384 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d\": container with ID starting with 1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d not found: ID does not exist" containerID="1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.588415 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d"} err="failed to get container status \"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d\": rpc error: code = NotFound desc = could not find container \"1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d\": container with ID starting with 1c1fa6151f2c00c8ba7b9cece8c9da09bb14dc47f07dd8b2e9561afeafbcd49d not found: ID does not exist" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.588437 4943 scope.go:117] "RemoveContainer" containerID="2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e" Nov 29 08:13:06 crc kubenswrapper[4943]: E1129 08:13:06.588730 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e\": container with ID starting with 2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e not found: ID does not exist" containerID="2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e" Nov 29 08:13:06 crc kubenswrapper[4943]: I1129 08:13:06.588839 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e"} err="failed to get container status \"2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e\": rpc error: code = NotFound desc = could not find container \"2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e\": container with ID starting with 2628005c027023c8f8f44185884640bd85af87a145a15628ae91eaaf7025689e not found: ID does not exist" Nov 29 08:13:07 crc kubenswrapper[4943]: I1129 08:13:07.338171 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" path="/var/lib/kubelet/pods/9256db89-4298-4fa8-a6dc-0f7a6ca961b0/volumes" Nov 29 08:13:32 crc kubenswrapper[4943]: I1129 08:13:32.614183 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:13:32 crc kubenswrapper[4943]: I1129 08:13:32.615241 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:13:32 crc kubenswrapper[4943]: I1129 08:13:32.615319 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:13:32 crc kubenswrapper[4943]: I1129 08:13:32.616702 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:13:32 crc kubenswrapper[4943]: I1129 08:13:32.616772 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" gracePeriod=600 Nov 29 08:13:32 crc kubenswrapper[4943]: E1129 08:13:32.754490 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:13:33 crc kubenswrapper[4943]: I1129 08:13:33.721168 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" exitCode=0 Nov 29 08:13:33 crc kubenswrapper[4943]: I1129 08:13:33.721672 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252"} Nov 29 08:13:33 crc kubenswrapper[4943]: I1129 08:13:33.721708 4943 scope.go:117] "RemoveContainer" containerID="35be6d8bd158487e792a0e62a80719dd61d7b8ffe6b15cd2b509b50edc4d91e8" Nov 29 08:13:33 crc kubenswrapper[4943]: I1129 08:13:33.722404 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:13:33 crc kubenswrapper[4943]: E1129 08:13:33.722724 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.417595 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:35 crc kubenswrapper[4943]: E1129 08:13:35.418482 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="registry-server" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.418503 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="registry-server" Nov 29 08:13:35 crc kubenswrapper[4943]: E1129 08:13:35.418540 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="extract-content" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.418550 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="extract-content" Nov 29 08:13:35 crc kubenswrapper[4943]: E1129 08:13:35.418600 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="extract-utilities" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.418612 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="extract-utilities" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.418905 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="9256db89-4298-4fa8-a6dc-0f7a6ca961b0" containerName="registry-server" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.420529 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.433012 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.579320 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.579527 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.579587 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b974w\" (UniqueName: \"kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.680947 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.681292 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b974w\" (UniqueName: \"kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.681507 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.681509 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.682011 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.706201 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b974w\" (UniqueName: \"kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w\") pod \"redhat-operators-lt8qr\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:35 crc kubenswrapper[4943]: I1129 08:13:35.742090 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:36 crc kubenswrapper[4943]: I1129 08:13:36.209790 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:36 crc kubenswrapper[4943]: I1129 08:13:36.752667 4943 generic.go:334] "Generic (PLEG): container finished" podID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerID="71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8" exitCode=0 Nov 29 08:13:36 crc kubenswrapper[4943]: I1129 08:13:36.752726 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerDied","Data":"71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8"} Nov 29 08:13:36 crc kubenswrapper[4943]: I1129 08:13:36.752771 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerStarted","Data":"6a7e06e4bbbe3c71f56fc77be7a079e5d3109d9116e420f979cf56116f00b055"} Nov 29 08:13:36 crc kubenswrapper[4943]: I1129 08:13:36.756833 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:13:37 crc kubenswrapper[4943]: I1129 08:13:37.763130 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerStarted","Data":"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f"} Nov 29 08:13:38 crc kubenswrapper[4943]: I1129 08:13:38.773631 4943 generic.go:334] "Generic (PLEG): container finished" podID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerID="8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f" exitCode=0 Nov 29 08:13:38 crc kubenswrapper[4943]: I1129 08:13:38.773668 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerDied","Data":"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f"} Nov 29 08:13:39 crc kubenswrapper[4943]: I1129 08:13:39.786031 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerStarted","Data":"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a"} Nov 29 08:13:39 crc kubenswrapper[4943]: I1129 08:13:39.802553 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lt8qr" podStartSLOduration=2.263086188 podStartE2EDuration="4.802532507s" podCreationTimestamp="2025-11-29 08:13:35 +0000 UTC" firstStartedPulling="2025-11-29 08:13:36.756589419 +0000 UTC m=+5991.686678172" lastFinishedPulling="2025-11-29 08:13:39.296035708 +0000 UTC m=+5994.226124491" observedRunningTime="2025-11-29 08:13:39.801296657 +0000 UTC m=+5994.731385420" watchObservedRunningTime="2025-11-29 08:13:39.802532507 +0000 UTC m=+5994.732621260" Nov 29 08:13:45 crc kubenswrapper[4943]: I1129 08:13:45.742933 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:45 crc kubenswrapper[4943]: I1129 08:13:45.744276 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:45 crc kubenswrapper[4943]: I1129 08:13:45.792047 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:45 crc kubenswrapper[4943]: I1129 08:13:45.900800 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:46 crc kubenswrapper[4943]: I1129 08:13:46.027472 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:46 crc kubenswrapper[4943]: I1129 08:13:46.328915 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:13:46 crc kubenswrapper[4943]: E1129 08:13:46.329156 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:13:47 crc kubenswrapper[4943]: I1129 08:13:47.868438 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lt8qr" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="registry-server" containerID="cri-o://d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a" gracePeriod=2 Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.424388 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.529332 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content\") pod \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.529505 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities\") pod \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.529542 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b974w\" (UniqueName: \"kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w\") pod \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\" (UID: \"2704d1fd-21ee-49d5-a9a4-d8c8b841d338\") " Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.530543 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities" (OuterVolumeSpecName: "utilities") pod "2704d1fd-21ee-49d5-a9a4-d8c8b841d338" (UID: "2704d1fd-21ee-49d5-a9a4-d8c8b841d338"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.541126 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w" (OuterVolumeSpecName: "kube-api-access-b974w") pod "2704d1fd-21ee-49d5-a9a4-d8c8b841d338" (UID: "2704d1fd-21ee-49d5-a9a4-d8c8b841d338"). InnerVolumeSpecName "kube-api-access-b974w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.632362 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.632398 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b974w\" (UniqueName: \"kubernetes.io/projected/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-kube-api-access-b974w\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.881432 4943 generic.go:334] "Generic (PLEG): container finished" podID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerID="d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a" exitCode=0 Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.881627 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lt8qr" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.881640 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerDied","Data":"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a"} Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.882866 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lt8qr" event={"ID":"2704d1fd-21ee-49d5-a9a4-d8c8b841d338","Type":"ContainerDied","Data":"6a7e06e4bbbe3c71f56fc77be7a079e5d3109d9116e420f979cf56116f00b055"} Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.882919 4943 scope.go:117] "RemoveContainer" containerID="d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.904480 4943 scope.go:117] "RemoveContainer" containerID="8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.929582 4943 scope.go:117] "RemoveContainer" containerID="71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.968590 4943 scope.go:117] "RemoveContainer" containerID="d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a" Nov 29 08:13:48 crc kubenswrapper[4943]: E1129 08:13:48.969125 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a\": container with ID starting with d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a not found: ID does not exist" containerID="d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.969163 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a"} err="failed to get container status \"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a\": rpc error: code = NotFound desc = could not find container \"d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a\": container with ID starting with d86c66bc7290f2d54e5e193b03a2d5d18cc1c01f98e99a4f193965a9ff71661a not found: ID does not exist" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.969186 4943 scope.go:117] "RemoveContainer" containerID="8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f" Nov 29 08:13:48 crc kubenswrapper[4943]: E1129 08:13:48.969658 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f\": container with ID starting with 8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f not found: ID does not exist" containerID="8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.969707 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f"} err="failed to get container status \"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f\": rpc error: code = NotFound desc = could not find container \"8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f\": container with ID starting with 8f147517657b06e45acb22b58bdb31fbeb789d140d8ea22c986cbfb0f634405f not found: ID does not exist" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.969737 4943 scope.go:117] "RemoveContainer" containerID="71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8" Nov 29 08:13:48 crc kubenswrapper[4943]: E1129 08:13:48.970099 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8\": container with ID starting with 71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8 not found: ID does not exist" containerID="71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8" Nov 29 08:13:48 crc kubenswrapper[4943]: I1129 08:13:48.970137 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8"} err="failed to get container status \"71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8\": rpc error: code = NotFound desc = could not find container \"71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8\": container with ID starting with 71db09a32a6edebfc5b5290eb649be4f28ccc4989740c366e6808771c4b213d8 not found: ID does not exist" Nov 29 08:13:50 crc kubenswrapper[4943]: I1129 08:13:50.137438 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2704d1fd-21ee-49d5-a9a4-d8c8b841d338" (UID: "2704d1fd-21ee-49d5-a9a4-d8c8b841d338"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:13:50 crc kubenswrapper[4943]: I1129 08:13:50.164753 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2704d1fd-21ee-49d5-a9a4-d8c8b841d338-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:13:50 crc kubenswrapper[4943]: I1129 08:13:50.417882 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:50 crc kubenswrapper[4943]: I1129 08:13:50.426967 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lt8qr"] Nov 29 08:13:51 crc kubenswrapper[4943]: I1129 08:13:51.338802 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" path="/var/lib/kubelet/pods/2704d1fd-21ee-49d5-a9a4-d8c8b841d338/volumes" Nov 29 08:13:57 crc kubenswrapper[4943]: I1129 08:13:57.328420 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:13:57 crc kubenswrapper[4943]: E1129 08:13:57.329380 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:14:09 crc kubenswrapper[4943]: I1129 08:14:09.327388 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:14:09 crc kubenswrapper[4943]: E1129 08:14:09.328391 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:14:23 crc kubenswrapper[4943]: I1129 08:14:23.328085 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:14:23 crc kubenswrapper[4943]: E1129 08:14:23.328879 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:14:38 crc kubenswrapper[4943]: I1129 08:14:38.328123 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:14:38 crc kubenswrapper[4943]: E1129 08:14:38.330249 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:14:53 crc kubenswrapper[4943]: I1129 08:14:53.327595 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:14:53 crc kubenswrapper[4943]: E1129 08:14:53.328342 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.142300 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4"] Nov 29 08:15:00 crc kubenswrapper[4943]: E1129 08:15:00.143579 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="registry-server" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.143598 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="registry-server" Nov 29 08:15:00 crc kubenswrapper[4943]: E1129 08:15:00.143610 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="extract-utilities" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.143620 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="extract-utilities" Nov 29 08:15:00 crc kubenswrapper[4943]: E1129 08:15:00.143634 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="extract-content" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.143640 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="extract-content" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.143879 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2704d1fd-21ee-49d5-a9a4-d8c8b841d338" containerName="registry-server" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.144738 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.149653 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.149833 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.158796 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4"] Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.230000 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm29j\" (UniqueName: \"kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.230479 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.230556 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.332189 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.332261 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.332329 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm29j\" (UniqueName: \"kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.334366 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.338946 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.359782 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm29j\" (UniqueName: \"kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j\") pod \"collect-profiles-29406735-7dbb4\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.468615 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:00 crc kubenswrapper[4943]: I1129 08:15:00.926447 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4"] Nov 29 08:15:01 crc kubenswrapper[4943]: I1129 08:15:01.521219 4943 generic.go:334] "Generic (PLEG): container finished" podID="2f661cf5-78cf-442c-934c-1eb19af969a8" containerID="ab6e5a25da96fcb7f284b9964d23a34e3c582affaa4d220f5cac928c9f03d81a" exitCode=0 Nov 29 08:15:01 crc kubenswrapper[4943]: I1129 08:15:01.521320 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" event={"ID":"2f661cf5-78cf-442c-934c-1eb19af969a8","Type":"ContainerDied","Data":"ab6e5a25da96fcb7f284b9964d23a34e3c582affaa4d220f5cac928c9f03d81a"} Nov 29 08:15:01 crc kubenswrapper[4943]: I1129 08:15:01.521515 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" event={"ID":"2f661cf5-78cf-442c-934c-1eb19af969a8","Type":"ContainerStarted","Data":"9f7e441c1f0a0e2e237f70c8bf16022cd7d99ac07039121cc60e5ecec58699b0"} Nov 29 08:15:02 crc kubenswrapper[4943]: I1129 08:15:02.993727 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.086282 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume\") pod \"2f661cf5-78cf-442c-934c-1eb19af969a8\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.086745 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume\") pod \"2f661cf5-78cf-442c-934c-1eb19af969a8\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.086895 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm29j\" (UniqueName: \"kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j\") pod \"2f661cf5-78cf-442c-934c-1eb19af969a8\" (UID: \"2f661cf5-78cf-442c-934c-1eb19af969a8\") " Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.091437 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume" (OuterVolumeSpecName: "config-volume") pod "2f661cf5-78cf-442c-934c-1eb19af969a8" (UID: "2f661cf5-78cf-442c-934c-1eb19af969a8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.097589 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2f661cf5-78cf-442c-934c-1eb19af969a8" (UID: "2f661cf5-78cf-442c-934c-1eb19af969a8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.119253 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j" (OuterVolumeSpecName: "kube-api-access-bm29j") pod "2f661cf5-78cf-442c-934c-1eb19af969a8" (UID: "2f661cf5-78cf-442c-934c-1eb19af969a8"). InnerVolumeSpecName "kube-api-access-bm29j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.198628 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm29j\" (UniqueName: \"kubernetes.io/projected/2f661cf5-78cf-442c-934c-1eb19af969a8-kube-api-access-bm29j\") on node \"crc\" DevicePath \"\"" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.198670 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f661cf5-78cf-442c-934c-1eb19af969a8-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:15:03 crc kubenswrapper[4943]: I1129 08:15:03.198683 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f661cf5-78cf-442c-934c-1eb19af969a8-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:15:04 crc kubenswrapper[4943]: I1129 08:15:04.083001 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" event={"ID":"2f661cf5-78cf-442c-934c-1eb19af969a8","Type":"ContainerDied","Data":"9f7e441c1f0a0e2e237f70c8bf16022cd7d99ac07039121cc60e5ecec58699b0"} Nov 29 08:15:04 crc kubenswrapper[4943]: I1129 08:15:04.083383 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f7e441c1f0a0e2e237f70c8bf16022cd7d99ac07039121cc60e5ecec58699b0" Nov 29 08:15:04 crc kubenswrapper[4943]: I1129 08:15:04.083480 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406735-7dbb4" Nov 29 08:15:04 crc kubenswrapper[4943]: I1129 08:15:04.119171 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq"] Nov 29 08:15:04 crc kubenswrapper[4943]: I1129 08:15:04.127759 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406690-vvqfq"] Nov 29 08:15:05 crc kubenswrapper[4943]: I1129 08:15:05.341610 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e55dbca3-5f0d-4f4c-b518-6258340a394f" path="/var/lib/kubelet/pods/e55dbca3-5f0d-4f4c-b518-6258340a394f/volumes" Nov 29 08:15:07 crc kubenswrapper[4943]: I1129 08:15:07.329919 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:15:07 crc kubenswrapper[4943]: E1129 08:15:07.331074 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:15:20 crc kubenswrapper[4943]: I1129 08:15:20.328711 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:15:20 crc kubenswrapper[4943]: E1129 08:15:20.330045 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:15:34 crc kubenswrapper[4943]: I1129 08:15:34.328504 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:15:34 crc kubenswrapper[4943]: E1129 08:15:34.329224 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:15:49 crc kubenswrapper[4943]: I1129 08:15:49.328515 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:15:49 crc kubenswrapper[4943]: E1129 08:15:49.329424 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:15:56 crc kubenswrapper[4943]: I1129 08:15:56.310192 4943 scope.go:117] "RemoveContainer" containerID="cc8152106d74ae4f4964fb1f30e1eb99fa9842944c26001caf3cbad68075f41b" Nov 29 08:16:03 crc kubenswrapper[4943]: I1129 08:16:03.402102 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:16:03 crc kubenswrapper[4943]: E1129 08:16:03.402781 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:16:18 crc kubenswrapper[4943]: I1129 08:16:18.327517 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:16:18 crc kubenswrapper[4943]: E1129 08:16:18.328316 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:16:32 crc kubenswrapper[4943]: I1129 08:16:32.327609 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:16:32 crc kubenswrapper[4943]: E1129 08:16:32.329399 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:16:44 crc kubenswrapper[4943]: I1129 08:16:44.327078 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:16:44 crc kubenswrapper[4943]: E1129 08:16:44.328147 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:16:56 crc kubenswrapper[4943]: I1129 08:16:56.328188 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:16:56 crc kubenswrapper[4943]: E1129 08:16:56.329265 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:17:09 crc kubenswrapper[4943]: I1129 08:17:09.328177 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:17:09 crc kubenswrapper[4943]: E1129 08:17:09.329338 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:17:24 crc kubenswrapper[4943]: I1129 08:17:24.327399 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:17:24 crc kubenswrapper[4943]: E1129 08:17:24.328152 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:17:37 crc kubenswrapper[4943]: I1129 08:17:37.327391 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:17:37 crc kubenswrapper[4943]: E1129 08:17:37.328340 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:17:48 crc kubenswrapper[4943]: I1129 08:17:48.327136 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:17:48 crc kubenswrapper[4943]: E1129 08:17:48.327973 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:18:01 crc kubenswrapper[4943]: I1129 08:18:01.327282 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:18:01 crc kubenswrapper[4943]: E1129 08:18:01.328139 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:18:15 crc kubenswrapper[4943]: I1129 08:18:15.334475 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:18:15 crc kubenswrapper[4943]: E1129 08:18:15.335317 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:18:19 crc kubenswrapper[4943]: I1129 08:18:19.660941 4943 patch_prober.go:28] interesting pod/route-controller-manager-669b76479-bmmxl container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 08:18:19 crc kubenswrapper[4943]: I1129 08:18:19.660975 4943 patch_prober.go:28] interesting pod/route-controller-manager-669b76479-bmmxl container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 08:18:19 crc kubenswrapper[4943]: I1129 08:18:19.661910 4943 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" podUID="c15212b0-d347-4dfa-9751-b6bc15a5b377" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 08:18:19 crc kubenswrapper[4943]: I1129 08:18:19.661985 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-669b76479-bmmxl" podUID="c15212b0-d347-4dfa-9751-b6bc15a5b377" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 08:18:30 crc kubenswrapper[4943]: I1129 08:18:30.328453 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:18:30 crc kubenswrapper[4943]: E1129 08:18:30.329187 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:18:42 crc kubenswrapper[4943]: I1129 08:18:42.327585 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:18:43 crc kubenswrapper[4943]: I1129 08:18:43.018420 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a"} Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.012929 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:32 crc kubenswrapper[4943]: E1129 08:19:32.016895 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f661cf5-78cf-442c-934c-1eb19af969a8" containerName="collect-profiles" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.016929 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f661cf5-78cf-442c-934c-1eb19af969a8" containerName="collect-profiles" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.017114 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f661cf5-78cf-442c-934c-1eb19af969a8" containerName="collect-profiles" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.018891 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.030879 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.112173 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.112325 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glpm2\" (UniqueName: \"kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.112420 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.214028 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.214196 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glpm2\" (UniqueName: \"kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.214403 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.214756 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.214782 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.232461 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glpm2\" (UniqueName: \"kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2\") pod \"redhat-marketplace-wrzc7\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.336582 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:32 crc kubenswrapper[4943]: I1129 08:19:32.812783 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:33 crc kubenswrapper[4943]: I1129 08:19:33.504369 4943 generic.go:334] "Generic (PLEG): container finished" podID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerID="09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559" exitCode=0 Nov 29 08:19:33 crc kubenswrapper[4943]: I1129 08:19:33.504526 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerDied","Data":"09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559"} Nov 29 08:19:33 crc kubenswrapper[4943]: I1129 08:19:33.504673 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerStarted","Data":"5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4"} Nov 29 08:19:33 crc kubenswrapper[4943]: I1129 08:19:33.506937 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:19:34 crc kubenswrapper[4943]: I1129 08:19:34.515191 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerStarted","Data":"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b"} Nov 29 08:19:35 crc kubenswrapper[4943]: I1129 08:19:35.528235 4943 generic.go:334] "Generic (PLEG): container finished" podID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerID="2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b" exitCode=0 Nov 29 08:19:35 crc kubenswrapper[4943]: I1129 08:19:35.528287 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerDied","Data":"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b"} Nov 29 08:19:36 crc kubenswrapper[4943]: I1129 08:19:36.537296 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerStarted","Data":"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f"} Nov 29 08:19:36 crc kubenswrapper[4943]: I1129 08:19:36.555312 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wrzc7" podStartSLOduration=3.159289313 podStartE2EDuration="5.555289465s" podCreationTimestamp="2025-11-29 08:19:31 +0000 UTC" firstStartedPulling="2025-11-29 08:19:33.506646194 +0000 UTC m=+6348.436734947" lastFinishedPulling="2025-11-29 08:19:35.902646346 +0000 UTC m=+6350.832735099" observedRunningTime="2025-11-29 08:19:36.553957123 +0000 UTC m=+6351.484045886" watchObservedRunningTime="2025-11-29 08:19:36.555289465 +0000 UTC m=+6351.485378238" Nov 29 08:19:42 crc kubenswrapper[4943]: I1129 08:19:42.337532 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:42 crc kubenswrapper[4943]: I1129 08:19:42.338125 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:42 crc kubenswrapper[4943]: I1129 08:19:42.381274 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:42 crc kubenswrapper[4943]: I1129 08:19:42.651006 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:42 crc kubenswrapper[4943]: I1129 08:19:42.730821 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:44 crc kubenswrapper[4943]: I1129 08:19:44.624452 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wrzc7" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="registry-server" containerID="cri-o://e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f" gracePeriod=2 Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.101919 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.280539 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities\") pod \"089edf9d-6775-4ef2-a379-86efd62e0c47\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.280620 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glpm2\" (UniqueName: \"kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2\") pod \"089edf9d-6775-4ef2-a379-86efd62e0c47\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.280787 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content\") pod \"089edf9d-6775-4ef2-a379-86efd62e0c47\" (UID: \"089edf9d-6775-4ef2-a379-86efd62e0c47\") " Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.281666 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities" (OuterVolumeSpecName: "utilities") pod "089edf9d-6775-4ef2-a379-86efd62e0c47" (UID: "089edf9d-6775-4ef2-a379-86efd62e0c47"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.282023 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.294502 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2" (OuterVolumeSpecName: "kube-api-access-glpm2") pod "089edf9d-6775-4ef2-a379-86efd62e0c47" (UID: "089edf9d-6775-4ef2-a379-86efd62e0c47"). InnerVolumeSpecName "kube-api-access-glpm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.301259 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "089edf9d-6775-4ef2-a379-86efd62e0c47" (UID: "089edf9d-6775-4ef2-a379-86efd62e0c47"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.384877 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/089edf9d-6775-4ef2-a379-86efd62e0c47-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.384921 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glpm2\" (UniqueName: \"kubernetes.io/projected/089edf9d-6775-4ef2-a379-86efd62e0c47-kube-api-access-glpm2\") on node \"crc\" DevicePath \"\"" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.642357 4943 generic.go:334] "Generic (PLEG): container finished" podID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerID="e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f" exitCode=0 Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.642414 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerDied","Data":"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f"} Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.642447 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wrzc7" event={"ID":"089edf9d-6775-4ef2-a379-86efd62e0c47","Type":"ContainerDied","Data":"5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4"} Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.642469 4943 scope.go:117] "RemoveContainer" containerID="e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.642544 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wrzc7" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.669035 4943 scope.go:117] "RemoveContainer" containerID="2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.670499 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.680607 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wrzc7"] Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.695056 4943 scope.go:117] "RemoveContainer" containerID="09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.746174 4943 scope.go:117] "RemoveContainer" containerID="e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f" Nov 29 08:19:45 crc kubenswrapper[4943]: E1129 08:19:45.746974 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f\": container with ID starting with e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f not found: ID does not exist" containerID="e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.747219 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f"} err="failed to get container status \"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f\": rpc error: code = NotFound desc = could not find container \"e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f\": container with ID starting with e1218934a31aa2989652276a63f1f5683960d5ea3f12fcb104d0d27cc4e0cc9f not found: ID does not exist" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.747257 4943 scope.go:117] "RemoveContainer" containerID="2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b" Nov 29 08:19:45 crc kubenswrapper[4943]: E1129 08:19:45.748183 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b\": container with ID starting with 2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b not found: ID does not exist" containerID="2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.748232 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b"} err="failed to get container status \"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b\": rpc error: code = NotFound desc = could not find container \"2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b\": container with ID starting with 2ddb529b2fd48ebd029c0ffd57f978bc179211e14054b6e486d649751357634b not found: ID does not exist" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.748253 4943 scope.go:117] "RemoveContainer" containerID="09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559" Nov 29 08:19:45 crc kubenswrapper[4943]: E1129 08:19:45.751741 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559\": container with ID starting with 09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559 not found: ID does not exist" containerID="09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559" Nov 29 08:19:45 crc kubenswrapper[4943]: I1129 08:19:45.751805 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559"} err="failed to get container status \"09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559\": rpc error: code = NotFound desc = could not find container \"09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559\": container with ID starting with 09b1d754a8c7a3c36247e1106745d617a2cae3ba09d1ed19aee0a2b26134e559 not found: ID does not exist" Nov 29 08:19:47 crc kubenswrapper[4943]: I1129 08:19:47.341353 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" path="/var/lib/kubelet/pods/089edf9d-6775-4ef2-a379-86efd62e0c47/volumes" Nov 29 08:19:49 crc kubenswrapper[4943]: E1129 08:19:49.360419 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache]" Nov 29 08:19:59 crc kubenswrapper[4943]: E1129 08:19:59.593348 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache]" Nov 29 08:20:09 crc kubenswrapper[4943]: E1129 08:20:09.848290 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache]" Nov 29 08:20:20 crc kubenswrapper[4943]: E1129 08:20:20.072985 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache]" Nov 29 08:20:30 crc kubenswrapper[4943]: E1129 08:20:30.349182 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache]" Nov 29 08:20:40 crc kubenswrapper[4943]: E1129 08:20:40.617843 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod089edf9d_6775_4ef2_a379_86efd62e0c47.slice/crio-5802da826b798bee217f9d091a91617b6efb76d17cc0a68bebc80f1fe8f86bd4\": RecentStats: unable to find data in memory cache]" Nov 29 08:21:02 crc kubenswrapper[4943]: I1129 08:21:02.613041 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:21:02 crc kubenswrapper[4943]: I1129 08:21:02.613657 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:21:32 crc kubenswrapper[4943]: I1129 08:21:32.613190 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:21:32 crc kubenswrapper[4943]: I1129 08:21:32.613693 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.452817 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:45 crc kubenswrapper[4943]: E1129 08:21:45.454959 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="extract-utilities" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.455269 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="extract-utilities" Nov 29 08:21:45 crc kubenswrapper[4943]: E1129 08:21:45.455366 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="registry-server" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.455451 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="registry-server" Nov 29 08:21:45 crc kubenswrapper[4943]: E1129 08:21:45.455613 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="extract-content" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.455700 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="extract-content" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.455987 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="089edf9d-6775-4ef2-a379-86efd62e0c47" containerName="registry-server" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.458087 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.465422 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.583726 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.583981 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lcw2\" (UniqueName: \"kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.584175 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.686651 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.686782 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lcw2\" (UniqueName: \"kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.686880 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.687116 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.687148 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.704581 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lcw2\" (UniqueName: \"kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2\") pod \"community-operators-fph5s\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:45 crc kubenswrapper[4943]: I1129 08:21:45.794353 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:46 crc kubenswrapper[4943]: I1129 08:21:46.385456 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:46 crc kubenswrapper[4943]: I1129 08:21:46.692358 4943 generic.go:334] "Generic (PLEG): container finished" podID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerID="a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2" exitCode=0 Nov 29 08:21:46 crc kubenswrapper[4943]: I1129 08:21:46.692400 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerDied","Data":"a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2"} Nov 29 08:21:46 crc kubenswrapper[4943]: I1129 08:21:46.692425 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerStarted","Data":"5c4fc9ce7b706eeeb6fcfb066400fceae328f7985b9d40463cc021c70a9ae891"} Nov 29 08:21:47 crc kubenswrapper[4943]: I1129 08:21:47.703130 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerStarted","Data":"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37"} Nov 29 08:21:48 crc kubenswrapper[4943]: I1129 08:21:48.713379 4943 generic.go:334] "Generic (PLEG): container finished" podID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerID="5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37" exitCode=0 Nov 29 08:21:48 crc kubenswrapper[4943]: I1129 08:21:48.713430 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerDied","Data":"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37"} Nov 29 08:21:50 crc kubenswrapper[4943]: I1129 08:21:50.734102 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerStarted","Data":"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731"} Nov 29 08:21:50 crc kubenswrapper[4943]: I1129 08:21:50.759307 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fph5s" podStartSLOduration=2.444090913 podStartE2EDuration="5.759286777s" podCreationTimestamp="2025-11-29 08:21:45 +0000 UTC" firstStartedPulling="2025-11-29 08:21:46.694106755 +0000 UTC m=+6481.624195508" lastFinishedPulling="2025-11-29 08:21:50.009302619 +0000 UTC m=+6484.939391372" observedRunningTime="2025-11-29 08:21:50.754302087 +0000 UTC m=+6485.684390850" watchObservedRunningTime="2025-11-29 08:21:50.759286777 +0000 UTC m=+6485.689375540" Nov 29 08:21:55 crc kubenswrapper[4943]: I1129 08:21:55.795190 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:55 crc kubenswrapper[4943]: I1129 08:21:55.795927 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:55 crc kubenswrapper[4943]: I1129 08:21:55.858094 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:56 crc kubenswrapper[4943]: I1129 08:21:56.836798 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:56 crc kubenswrapper[4943]: I1129 08:21:56.887415 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:58 crc kubenswrapper[4943]: I1129 08:21:58.800784 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fph5s" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="registry-server" containerID="cri-o://ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731" gracePeriod=2 Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.297750 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.360865 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities\") pod \"41077a7f-3382-4f81-89a8-88436d8bcf3e\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.361003 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content\") pod \"41077a7f-3382-4f81-89a8-88436d8bcf3e\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.361048 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lcw2\" (UniqueName: \"kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2\") pod \"41077a7f-3382-4f81-89a8-88436d8bcf3e\" (UID: \"41077a7f-3382-4f81-89a8-88436d8bcf3e\") " Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.361993 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities" (OuterVolumeSpecName: "utilities") pod "41077a7f-3382-4f81-89a8-88436d8bcf3e" (UID: "41077a7f-3382-4f81-89a8-88436d8bcf3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.369042 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2" (OuterVolumeSpecName: "kube-api-access-9lcw2") pod "41077a7f-3382-4f81-89a8-88436d8bcf3e" (UID: "41077a7f-3382-4f81-89a8-88436d8bcf3e"). InnerVolumeSpecName "kube-api-access-9lcw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.416059 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41077a7f-3382-4f81-89a8-88436d8bcf3e" (UID: "41077a7f-3382-4f81-89a8-88436d8bcf3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.463458 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.463490 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41077a7f-3382-4f81-89a8-88436d8bcf3e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.463499 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lcw2\" (UniqueName: \"kubernetes.io/projected/41077a7f-3382-4f81-89a8-88436d8bcf3e-kube-api-access-9lcw2\") on node \"crc\" DevicePath \"\"" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.820994 4943 generic.go:334] "Generic (PLEG): container finished" podID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerID="ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731" exitCode=0 Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.821050 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerDied","Data":"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731"} Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.821093 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fph5s" event={"ID":"41077a7f-3382-4f81-89a8-88436d8bcf3e","Type":"ContainerDied","Data":"5c4fc9ce7b706eeeb6fcfb066400fceae328f7985b9d40463cc021c70a9ae891"} Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.821105 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fph5s" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.821127 4943 scope.go:117] "RemoveContainer" containerID="ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.848495 4943 scope.go:117] "RemoveContainer" containerID="5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.886221 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.889027 4943 scope.go:117] "RemoveContainer" containerID="a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.897897 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fph5s"] Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.915752 4943 scope.go:117] "RemoveContainer" containerID="ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731" Nov 29 08:21:59 crc kubenswrapper[4943]: E1129 08:21:59.916212 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731\": container with ID starting with ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731 not found: ID does not exist" containerID="ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.916252 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731"} err="failed to get container status \"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731\": rpc error: code = NotFound desc = could not find container \"ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731\": container with ID starting with ca735e97370a0b7ec5ff0a70cd3f7c335581e62e6f3baa2704159e80043a0731 not found: ID does not exist" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.916280 4943 scope.go:117] "RemoveContainer" containerID="5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37" Nov 29 08:21:59 crc kubenswrapper[4943]: E1129 08:21:59.916622 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37\": container with ID starting with 5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37 not found: ID does not exist" containerID="5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.916647 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37"} err="failed to get container status \"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37\": rpc error: code = NotFound desc = could not find container \"5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37\": container with ID starting with 5ace50f4b5f0a57ccdbf821ca697de14de46309075e87a4740a0313bd6b7de37 not found: ID does not exist" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.916664 4943 scope.go:117] "RemoveContainer" containerID="a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2" Nov 29 08:21:59 crc kubenswrapper[4943]: E1129 08:21:59.916868 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2\": container with ID starting with a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2 not found: ID does not exist" containerID="a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2" Nov 29 08:21:59 crc kubenswrapper[4943]: I1129 08:21:59.916893 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2"} err="failed to get container status \"a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2\": rpc error: code = NotFound desc = could not find container \"a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2\": container with ID starting with a1feb25405384799adf5d75d73b257d09389816b84ff503837776c56d77b32a2 not found: ID does not exist" Nov 29 08:22:01 crc kubenswrapper[4943]: I1129 08:22:01.342284 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" path="/var/lib/kubelet/pods/41077a7f-3382-4f81-89a8-88436d8bcf3e/volumes" Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.612773 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.613292 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.613340 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.614086 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.614155 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a" gracePeriod=600 Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.852423 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a" exitCode=0 Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.852805 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a"} Nov 29 08:22:02 crc kubenswrapper[4943]: I1129 08:22:02.852843 4943 scope.go:117] "RemoveContainer" containerID="9a85a80011dd3f8dfcde5ff799b8ba86c3aee14ae9a4b74b337a6b6f12326252" Nov 29 08:22:03 crc kubenswrapper[4943]: I1129 08:22:03.864721 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a"} Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.059536 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:10 crc kubenswrapper[4943]: E1129 08:23:10.060495 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="registry-server" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.060508 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="registry-server" Nov 29 08:23:10 crc kubenswrapper[4943]: E1129 08:23:10.060528 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="extract-utilities" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.060534 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="extract-utilities" Nov 29 08:23:10 crc kubenswrapper[4943]: E1129 08:23:10.060544 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="extract-content" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.060550 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="extract-content" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.060970 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="41077a7f-3382-4f81-89a8-88436d8bcf3e" containerName="registry-server" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.062356 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.081948 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.162264 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv7cr\" (UniqueName: \"kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.162485 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.162594 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.264248 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.264330 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.264386 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv7cr\" (UniqueName: \"kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.265139 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.265363 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.299330 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv7cr\" (UniqueName: \"kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr\") pod \"certified-operators-w7nt9\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.382988 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:10 crc kubenswrapper[4943]: I1129 08:23:10.824682 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:11 crc kubenswrapper[4943]: I1129 08:23:11.457884 4943 generic.go:334] "Generic (PLEG): container finished" podID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerID="0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477" exitCode=0 Nov 29 08:23:11 crc kubenswrapper[4943]: I1129 08:23:11.457967 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerDied","Data":"0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477"} Nov 29 08:23:11 crc kubenswrapper[4943]: I1129 08:23:11.458221 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerStarted","Data":"8b9e6e5e0a329f869772bd79f3ba2075eecde4de323d9df1da99a170a55f5ac5"} Nov 29 08:23:13 crc kubenswrapper[4943]: I1129 08:23:13.478491 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerStarted","Data":"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0"} Nov 29 08:23:14 crc kubenswrapper[4943]: I1129 08:23:14.489800 4943 generic.go:334] "Generic (PLEG): container finished" podID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerID="c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0" exitCode=0 Nov 29 08:23:14 crc kubenswrapper[4943]: I1129 08:23:14.489876 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerDied","Data":"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0"} Nov 29 08:23:15 crc kubenswrapper[4943]: I1129 08:23:15.500966 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerStarted","Data":"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db"} Nov 29 08:23:15 crc kubenswrapper[4943]: I1129 08:23:15.521987 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w7nt9" podStartSLOduration=2.066303452 podStartE2EDuration="5.521947861s" podCreationTimestamp="2025-11-29 08:23:10 +0000 UTC" firstStartedPulling="2025-11-29 08:23:11.4593116 +0000 UTC m=+6566.389400353" lastFinishedPulling="2025-11-29 08:23:14.914956009 +0000 UTC m=+6569.845044762" observedRunningTime="2025-11-29 08:23:15.519448822 +0000 UTC m=+6570.449537575" watchObservedRunningTime="2025-11-29 08:23:15.521947861 +0000 UTC m=+6570.452036624" Nov 29 08:23:20 crc kubenswrapper[4943]: I1129 08:23:20.383697 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:20 crc kubenswrapper[4943]: I1129 08:23:20.384300 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:20 crc kubenswrapper[4943]: I1129 08:23:20.449783 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:20 crc kubenswrapper[4943]: I1129 08:23:20.606609 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:20 crc kubenswrapper[4943]: I1129 08:23:20.690728 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:22 crc kubenswrapper[4943]: I1129 08:23:22.563285 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w7nt9" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="registry-server" containerID="cri-o://ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db" gracePeriod=2 Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.042725 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.127190 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sv7cr\" (UniqueName: \"kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr\") pod \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.127372 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content\") pod \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.127525 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities\") pod \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\" (UID: \"492fcdac-bd79-4e38-b445-f3698c7fdd6a\") " Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.128823 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities" (OuterVolumeSpecName: "utilities") pod "492fcdac-bd79-4e38-b445-f3698c7fdd6a" (UID: "492fcdac-bd79-4e38-b445-f3698c7fdd6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.133537 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr" (OuterVolumeSpecName: "kube-api-access-sv7cr") pod "492fcdac-bd79-4e38-b445-f3698c7fdd6a" (UID: "492fcdac-bd79-4e38-b445-f3698c7fdd6a"). InnerVolumeSpecName "kube-api-access-sv7cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.229723 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.229754 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sv7cr\" (UniqueName: \"kubernetes.io/projected/492fcdac-bd79-4e38-b445-f3698c7fdd6a-kube-api-access-sv7cr\") on node \"crc\" DevicePath \"\"" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.376780 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "492fcdac-bd79-4e38-b445-f3698c7fdd6a" (UID: "492fcdac-bd79-4e38-b445-f3698c7fdd6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.433772 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/492fcdac-bd79-4e38-b445-f3698c7fdd6a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.576179 4943 generic.go:334] "Generic (PLEG): container finished" podID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerID="ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db" exitCode=0 Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.576217 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerDied","Data":"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db"} Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.576270 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7nt9" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.576289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7nt9" event={"ID":"492fcdac-bd79-4e38-b445-f3698c7fdd6a","Type":"ContainerDied","Data":"8b9e6e5e0a329f869772bd79f3ba2075eecde4de323d9df1da99a170a55f5ac5"} Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.576325 4943 scope.go:117] "RemoveContainer" containerID="ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.620208 4943 scope.go:117] "RemoveContainer" containerID="c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.624777 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.635758 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w7nt9"] Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.646944 4943 scope.go:117] "RemoveContainer" containerID="0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.701848 4943 scope.go:117] "RemoveContainer" containerID="ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db" Nov 29 08:23:23 crc kubenswrapper[4943]: E1129 08:23:23.702332 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db\": container with ID starting with ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db not found: ID does not exist" containerID="ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.702395 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db"} err="failed to get container status \"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db\": rpc error: code = NotFound desc = could not find container \"ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db\": container with ID starting with ed63c872bdee649e80ccb1628ec8e5c6aaab56e596827d300d1f9b7b1b0286db not found: ID does not exist" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.702429 4943 scope.go:117] "RemoveContainer" containerID="c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0" Nov 29 08:23:23 crc kubenswrapper[4943]: E1129 08:23:23.702823 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0\": container with ID starting with c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0 not found: ID does not exist" containerID="c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.702921 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0"} err="failed to get container status \"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0\": rpc error: code = NotFound desc = could not find container \"c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0\": container with ID starting with c7bd8d0fc25cb07cec2eef22283c1c5b953540cdd53c28210575b2f3f1ad3ac0 not found: ID does not exist" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.702974 4943 scope.go:117] "RemoveContainer" containerID="0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477" Nov 29 08:23:23 crc kubenswrapper[4943]: E1129 08:23:23.703298 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477\": container with ID starting with 0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477 not found: ID does not exist" containerID="0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477" Nov 29 08:23:23 crc kubenswrapper[4943]: I1129 08:23:23.703326 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477"} err="failed to get container status \"0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477\": rpc error: code = NotFound desc = could not find container \"0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477\": container with ID starting with 0b574368e3452ea6e40348de5cf6870d9d5e28256459150f4576f5b51f313477 not found: ID does not exist" Nov 29 08:23:25 crc kubenswrapper[4943]: I1129 08:23:25.337666 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" path="/var/lib/kubelet/pods/492fcdac-bd79-4e38-b445-f3698c7fdd6a/volumes" Nov 29 08:24:02 crc kubenswrapper[4943]: I1129 08:24:02.614172 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:24:02 crc kubenswrapper[4943]: I1129 08:24:02.614817 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:24:03 crc kubenswrapper[4943]: I1129 08:24:03.922977 4943 generic.go:334] "Generic (PLEG): container finished" podID="19a81291-2c8c-4099-a701-7f4049f4e890" containerID="85ab34ab73b8e4a588fa3558fe8ef8bae3913ebea2c506b72e377a7d8f9ce45a" exitCode=1 Nov 29 08:24:03 crc kubenswrapper[4943]: I1129 08:24:03.923111 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"19a81291-2c8c-4099-a701-7f4049f4e890","Type":"ContainerDied","Data":"85ab34ab73b8e4a588fa3558fe8ef8bae3913ebea2c506b72e377a7d8f9ce45a"} Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.429044 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.492727 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.492827 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m94cs\" (UniqueName: \"kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.492882 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.492913 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.492999 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.493072 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.493097 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.493113 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.493144 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs\") pod \"19a81291-2c8c-4099-a701-7f4049f4e890\" (UID: \"19a81291-2c8c-4099-a701-7f4049f4e890\") " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.493914 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.494489 4943 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.494554 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data" (OuterVolumeSpecName: "config-data") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.497419 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.500799 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs" (OuterVolumeSpecName: "kube-api-access-m94cs") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "kube-api-access-m94cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.508956 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "test-operator-logs") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.521983 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.522357 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.524277 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.541396 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "19a81291-2c8c-4099-a701-7f4049f4e890" (UID: "19a81291-2c8c-4099-a701-7f4049f4e890"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596155 4943 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596186 4943 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596196 4943 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/19a81291-2c8c-4099-a701-7f4049f4e890-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596207 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m94cs\" (UniqueName: \"kubernetes.io/projected/19a81291-2c8c-4099-a701-7f4049f4e890-kube-api-access-m94cs\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596218 4943 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596226 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596234 4943 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.596242 4943 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19a81291-2c8c-4099-a701-7f4049f4e890-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.614540 4943 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.697466 4943 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.942289 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"19a81291-2c8c-4099-a701-7f4049f4e890","Type":"ContainerDied","Data":"0aa5280ba75ee13e9eb1d7bc93800a2a2da2ff251c9d22333844caacf3d45acb"} Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.942532 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0aa5280ba75ee13e9eb1d7bc93800a2a2da2ff251c9d22333844caacf3d45acb" Nov 29 08:24:05 crc kubenswrapper[4943]: I1129 08:24:05.942374 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.921695 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 08:24:16 crc kubenswrapper[4943]: E1129 08:24:16.922681 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="registry-server" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.922697 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="registry-server" Nov 29 08:24:16 crc kubenswrapper[4943]: E1129 08:24:16.922724 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="extract-content" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.922732 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="extract-content" Nov 29 08:24:16 crc kubenswrapper[4943]: E1129 08:24:16.922769 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="extract-utilities" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.922778 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="extract-utilities" Nov 29 08:24:16 crc kubenswrapper[4943]: E1129 08:24:16.922788 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a81291-2c8c-4099-a701-7f4049f4e890" containerName="tempest-tests-tempest-tests-runner" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.922797 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a81291-2c8c-4099-a701-7f4049f4e890" containerName="tempest-tests-tempest-tests-runner" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.923006 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a81291-2c8c-4099-a701-7f4049f4e890" containerName="tempest-tests-tempest-tests-runner" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.923046 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="492fcdac-bd79-4e38-b445-f3698c7fdd6a" containerName="registry-server" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.923872 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.926725 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-9gct7" Nov 29 08:24:16 crc kubenswrapper[4943]: I1129 08:24:16.936090 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.119132 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.119243 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z64dl\" (UniqueName: \"kubernetes.io/projected/346fe3c9-1f3c-487e-9e9d-b10d780a7d11-kube-api-access-z64dl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.222230 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z64dl\" (UniqueName: \"kubernetes.io/projected/346fe3c9-1f3c-487e-9e9d-b10d780a7d11-kube-api-access-z64dl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.222437 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.223077 4943 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.253242 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z64dl\" (UniqueName: \"kubernetes.io/projected/346fe3c9-1f3c-487e-9e9d-b10d780a7d11-kube-api-access-z64dl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.257036 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"346fe3c9-1f3c-487e-9e9d-b10d780a7d11\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.544122 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 08:24:17 crc kubenswrapper[4943]: I1129 08:24:17.990411 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 08:24:17 crc kubenswrapper[4943]: W1129 08:24:17.993512 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod346fe3c9_1f3c_487e_9e9d_b10d780a7d11.slice/crio-232e5a628fb3f4bbb90476ce66a21f6c48a9050308d89c00bf4270c0f58651c0 WatchSource:0}: Error finding container 232e5a628fb3f4bbb90476ce66a21f6c48a9050308d89c00bf4270c0f58651c0: Status 404 returned error can't find the container with id 232e5a628fb3f4bbb90476ce66a21f6c48a9050308d89c00bf4270c0f58651c0 Nov 29 08:24:18 crc kubenswrapper[4943]: I1129 08:24:18.052245 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"346fe3c9-1f3c-487e-9e9d-b10d780a7d11","Type":"ContainerStarted","Data":"232e5a628fb3f4bbb90476ce66a21f6c48a9050308d89c00bf4270c0f58651c0"} Nov 29 08:24:20 crc kubenswrapper[4943]: I1129 08:24:20.070077 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"346fe3c9-1f3c-487e-9e9d-b10d780a7d11","Type":"ContainerStarted","Data":"465eab55941090626f0f15c25651def3d84d79b3152327fdde42df5d53417a53"} Nov 29 08:24:20 crc kubenswrapper[4943]: I1129 08:24:20.092490 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=3.039978629 podStartE2EDuration="4.092470453s" podCreationTimestamp="2025-11-29 08:24:16 +0000 UTC" firstStartedPulling="2025-11-29 08:24:17.998054316 +0000 UTC m=+6632.928143079" lastFinishedPulling="2025-11-29 08:24:19.05054615 +0000 UTC m=+6633.980634903" observedRunningTime="2025-11-29 08:24:20.085326641 +0000 UTC m=+6635.015415384" watchObservedRunningTime="2025-11-29 08:24:20.092470453 +0000 UTC m=+6635.022559206" Nov 29 08:24:32 crc kubenswrapper[4943]: I1129 08:24:32.613405 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:24:32 crc kubenswrapper[4943]: I1129 08:24:32.613959 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.021185 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-q4cgs/must-gather-khqct"] Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.023294 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.025342 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-q4cgs"/"kube-root-ca.crt" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.025918 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-q4cgs"/"openshift-service-ca.crt" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.026579 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-q4cgs"/"default-dockercfg-ljtxs" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.035857 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-q4cgs/must-gather-khqct"] Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.150391 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fwdd\" (UniqueName: \"kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.150446 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.252489 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.252740 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fwdd\" (UniqueName: \"kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.253149 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.273522 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fwdd\" (UniqueName: \"kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd\") pod \"must-gather-khqct\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.340251 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.805873 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-q4cgs/must-gather-khqct"] Nov 29 08:25:00 crc kubenswrapper[4943]: I1129 08:25:00.815631 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:25:01 crc kubenswrapper[4943]: I1129 08:25:01.480832 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/must-gather-khqct" event={"ID":"60e4522f-8e82-468d-8541-e0ae3d31456a","Type":"ContainerStarted","Data":"55458db268d71ca5a807d646db955bd429642822adb4e5f0992a706ca9071ef0"} Nov 29 08:25:02 crc kubenswrapper[4943]: I1129 08:25:02.613278 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:25:02 crc kubenswrapper[4943]: I1129 08:25:02.613632 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:25:02 crc kubenswrapper[4943]: I1129 08:25:02.613687 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:25:02 crc kubenswrapper[4943]: I1129 08:25:02.614516 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:25:02 crc kubenswrapper[4943]: I1129 08:25:02.614598 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" gracePeriod=600 Nov 29 08:25:02 crc kubenswrapper[4943]: E1129 08:25:02.738128 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:25:03 crc kubenswrapper[4943]: I1129 08:25:03.497865 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" exitCode=0 Nov 29 08:25:03 crc kubenswrapper[4943]: I1129 08:25:03.497921 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a"} Nov 29 08:25:03 crc kubenswrapper[4943]: I1129 08:25:03.498240 4943 scope.go:117] "RemoveContainer" containerID="d37250ae681cba879967783a99876c5526a6a4dbf946dee72f1c200f987fa32a" Nov 29 08:25:03 crc kubenswrapper[4943]: I1129 08:25:03.498885 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:25:03 crc kubenswrapper[4943]: E1129 08:25:03.499196 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:25:05 crc kubenswrapper[4943]: I1129 08:25:05.521333 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/must-gather-khqct" event={"ID":"60e4522f-8e82-468d-8541-e0ae3d31456a","Type":"ContainerStarted","Data":"c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb"} Nov 29 08:25:05 crc kubenswrapper[4943]: I1129 08:25:05.521874 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/must-gather-khqct" event={"ID":"60e4522f-8e82-468d-8541-e0ae3d31456a","Type":"ContainerStarted","Data":"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5"} Nov 29 08:25:05 crc kubenswrapper[4943]: I1129 08:25:05.547611 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-q4cgs/must-gather-khqct" podStartSLOduration=1.976863708 podStartE2EDuration="5.547560191s" podCreationTimestamp="2025-11-29 08:25:00 +0000 UTC" firstStartedPulling="2025-11-29 08:25:00.815396915 +0000 UTC m=+6675.745485668" lastFinishedPulling="2025-11-29 08:25:04.386093408 +0000 UTC m=+6679.316182151" observedRunningTime="2025-11-29 08:25:05.533527604 +0000 UTC m=+6680.463616367" watchObservedRunningTime="2025-11-29 08:25:05.547560191 +0000 UTC m=+6680.477648964" Nov 29 08:25:07 crc kubenswrapper[4943]: E1129 08:25:07.270332 4943 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.148:44776->38.102.83.148:44389: read tcp 38.102.83.148:44776->38.102.83.148:44389: read: connection reset by peer Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.516988 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-ml4l9"] Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.519092 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.637877 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.637994 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqmh2\" (UniqueName: \"kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.739643 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqmh2\" (UniqueName: \"kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.739813 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.739896 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.770467 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqmh2\" (UniqueName: \"kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2\") pod \"crc-debug-ml4l9\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:08 crc kubenswrapper[4943]: I1129 08:25:08.836883 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:25:09 crc kubenswrapper[4943]: I1129 08:25:09.553871 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" event={"ID":"92fb114e-913c-490c-8c04-cf764f0abb4e","Type":"ContainerStarted","Data":"c394afbbda3d7fcd2c8552d2ee5a01c5d2d5dacb27e91d6a559f204de3beab5a"} Nov 29 08:25:16 crc kubenswrapper[4943]: I1129 08:25:16.327941 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:25:16 crc kubenswrapper[4943]: E1129 08:25:16.328689 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:25:19 crc kubenswrapper[4943]: I1129 08:25:19.636529 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" event={"ID":"92fb114e-913c-490c-8c04-cf764f0abb4e","Type":"ContainerStarted","Data":"aa727089b0f93873e04dbd69e064e0e2552a53c1f1797c47ee7dafa29a6b4f60"} Nov 29 08:25:19 crc kubenswrapper[4943]: I1129 08:25:19.650315 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" podStartSLOduration=1.7784182039999998 podStartE2EDuration="11.65030001s" podCreationTimestamp="2025-11-29 08:25:08 +0000 UTC" firstStartedPulling="2025-11-29 08:25:08.882305026 +0000 UTC m=+6683.812393789" lastFinishedPulling="2025-11-29 08:25:18.754186842 +0000 UTC m=+6693.684275595" observedRunningTime="2025-11-29 08:25:19.648616399 +0000 UTC m=+6694.578705152" watchObservedRunningTime="2025-11-29 08:25:19.65030001 +0000 UTC m=+6694.580388763" Nov 29 08:25:31 crc kubenswrapper[4943]: I1129 08:25:31.327865 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:25:31 crc kubenswrapper[4943]: E1129 08:25:31.328891 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:25:44 crc kubenswrapper[4943]: I1129 08:25:44.328064 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:25:44 crc kubenswrapper[4943]: E1129 08:25:44.330368 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:25:58 crc kubenswrapper[4943]: I1129 08:25:58.327842 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:25:58 crc kubenswrapper[4943]: E1129 08:25:58.328843 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:03 crc kubenswrapper[4943]: I1129 08:26:03.017346 4943 generic.go:334] "Generic (PLEG): container finished" podID="92fb114e-913c-490c-8c04-cf764f0abb4e" containerID="aa727089b0f93873e04dbd69e064e0e2552a53c1f1797c47ee7dafa29a6b4f60" exitCode=0 Nov 29 08:26:03 crc kubenswrapper[4943]: I1129 08:26:03.017473 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" event={"ID":"92fb114e-913c-490c-8c04-cf764f0abb4e","Type":"ContainerDied","Data":"aa727089b0f93873e04dbd69e064e0e2552a53c1f1797c47ee7dafa29a6b4f60"} Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.145430 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.178013 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-ml4l9"] Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.185639 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-ml4l9"] Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.222494 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host\") pod \"92fb114e-913c-490c-8c04-cf764f0abb4e\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.222624 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqmh2\" (UniqueName: \"kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2\") pod \"92fb114e-913c-490c-8c04-cf764f0abb4e\" (UID: \"92fb114e-913c-490c-8c04-cf764f0abb4e\") " Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.222739 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host" (OuterVolumeSpecName: "host") pod "92fb114e-913c-490c-8c04-cf764f0abb4e" (UID: "92fb114e-913c-490c-8c04-cf764f0abb4e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.223176 4943 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/92fb114e-913c-490c-8c04-cf764f0abb4e-host\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.228086 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2" (OuterVolumeSpecName: "kube-api-access-kqmh2") pod "92fb114e-913c-490c-8c04-cf764f0abb4e" (UID: "92fb114e-913c-490c-8c04-cf764f0abb4e"). InnerVolumeSpecName "kube-api-access-kqmh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:26:04 crc kubenswrapper[4943]: I1129 08:26:04.325041 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqmh2\" (UniqueName: \"kubernetes.io/projected/92fb114e-913c-490c-8c04-cf764f0abb4e-kube-api-access-kqmh2\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.039536 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c394afbbda3d7fcd2c8552d2ee5a01c5d2d5dacb27e91d6a559f204de3beab5a" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.039662 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-ml4l9" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.341224 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92fb114e-913c-490c-8c04-cf764f0abb4e" path="/var/lib/kubelet/pods/92fb114e-913c-490c-8c04-cf764f0abb4e/volumes" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.386261 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-nfl8k"] Nov 29 08:26:05 crc kubenswrapper[4943]: E1129 08:26:05.386725 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92fb114e-913c-490c-8c04-cf764f0abb4e" containerName="container-00" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.386749 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="92fb114e-913c-490c-8c04-cf764f0abb4e" containerName="container-00" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.387024 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="92fb114e-913c-490c-8c04-cf764f0abb4e" containerName="container-00" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.387748 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.448264 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmg8f\" (UniqueName: \"kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.448402 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.550472 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.550660 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmg8f\" (UniqueName: \"kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.550944 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.569682 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmg8f\" (UniqueName: \"kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f\") pod \"crc-debug-nfl8k\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:05 crc kubenswrapper[4943]: I1129 08:26:05.709644 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:06 crc kubenswrapper[4943]: I1129 08:26:06.051818 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" event={"ID":"0a2d8f64-ec63-4dc3-a253-3e8269df710b","Type":"ContainerStarted","Data":"e33d1412a89893d73b888ed392252de7e7556b6ff8d9820bc4709033d750275a"} Nov 29 08:26:06 crc kubenswrapper[4943]: I1129 08:26:06.052515 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" event={"ID":"0a2d8f64-ec63-4dc3-a253-3e8269df710b","Type":"ContainerStarted","Data":"23a93e787aab0f80919c85f69d3fca6a933b7156b3cabdfcdaec4d25c8f4ced1"} Nov 29 08:26:06 crc kubenswrapper[4943]: I1129 08:26:06.088021 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" podStartSLOduration=1.088001274 podStartE2EDuration="1.088001274s" podCreationTimestamp="2025-11-29 08:26:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 08:26:06.083655409 +0000 UTC m=+6741.013744172" watchObservedRunningTime="2025-11-29 08:26:06.088001274 +0000 UTC m=+6741.018090037" Nov 29 08:26:07 crc kubenswrapper[4943]: I1129 08:26:07.062685 4943 generic.go:334] "Generic (PLEG): container finished" podID="0a2d8f64-ec63-4dc3-a253-3e8269df710b" containerID="e33d1412a89893d73b888ed392252de7e7556b6ff8d9820bc4709033d750275a" exitCode=0 Nov 29 08:26:07 crc kubenswrapper[4943]: I1129 08:26:07.062733 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" event={"ID":"0a2d8f64-ec63-4dc3-a253-3e8269df710b","Type":"ContainerDied","Data":"e33d1412a89893d73b888ed392252de7e7556b6ff8d9820bc4709033d750275a"} Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.169812 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.203742 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host\") pod \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.203802 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmg8f\" (UniqueName: \"kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f\") pod \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\" (UID: \"0a2d8f64-ec63-4dc3-a253-3e8269df710b\") " Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.204317 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host" (OuterVolumeSpecName: "host") pod "0a2d8f64-ec63-4dc3-a253-3e8269df710b" (UID: "0a2d8f64-ec63-4dc3-a253-3e8269df710b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.209339 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f" (OuterVolumeSpecName: "kube-api-access-lmg8f") pod "0a2d8f64-ec63-4dc3-a253-3e8269df710b" (UID: "0a2d8f64-ec63-4dc3-a253-3e8269df710b"). InnerVolumeSpecName "kube-api-access-lmg8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.304954 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmg8f\" (UniqueName: \"kubernetes.io/projected/0a2d8f64-ec63-4dc3-a253-3e8269df710b-kube-api-access-lmg8f\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.304987 4943 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a2d8f64-ec63-4dc3-a253-3e8269df710b-host\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.868807 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-nfl8k"] Nov 29 08:26:08 crc kubenswrapper[4943]: I1129 08:26:08.885415 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-nfl8k"] Nov 29 08:26:09 crc kubenswrapper[4943]: I1129 08:26:09.084668 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23a93e787aab0f80919c85f69d3fca6a933b7156b3cabdfcdaec4d25c8f4ced1" Nov 29 08:26:09 crc kubenswrapper[4943]: I1129 08:26:09.084745 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-nfl8k" Nov 29 08:26:09 crc kubenswrapper[4943]: I1129 08:26:09.327284 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:26:09 crc kubenswrapper[4943]: E1129 08:26:09.327726 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:09 crc kubenswrapper[4943]: I1129 08:26:09.344825 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a2d8f64-ec63-4dc3-a253-3e8269df710b" path="/var/lib/kubelet/pods/0a2d8f64-ec63-4dc3-a253-3e8269df710b/volumes" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.019663 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-qzbw7"] Nov 29 08:26:10 crc kubenswrapper[4943]: E1129 08:26:10.020077 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a2d8f64-ec63-4dc3-a253-3e8269df710b" containerName="container-00" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.020098 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a2d8f64-ec63-4dc3-a253-3e8269df710b" containerName="container-00" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.020298 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a2d8f64-ec63-4dc3-a253-3e8269df710b" containerName="container-00" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.021016 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.075435 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp9dv\" (UniqueName: \"kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.075747 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.177536 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp9dv\" (UniqueName: \"kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.177654 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.177795 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.195737 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp9dv\" (UniqueName: \"kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv\") pod \"crc-debug-qzbw7\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: I1129 08:26:10.345259 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:10 crc kubenswrapper[4943]: W1129 08:26:10.379577 4943 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b73935b_d04c_42ff_9921_38133b298f26.slice/crio-58cf1765fb705dc92441ced841c889c777cdff81cb1e28fc990b9e253facb128 WatchSource:0}: Error finding container 58cf1765fb705dc92441ced841c889c777cdff81cb1e28fc990b9e253facb128: Status 404 returned error can't find the container with id 58cf1765fb705dc92441ced841c889c777cdff81cb1e28fc990b9e253facb128 Nov 29 08:26:11 crc kubenswrapper[4943]: I1129 08:26:11.107508 4943 generic.go:334] "Generic (PLEG): container finished" podID="6b73935b-d04c-42ff-9921-38133b298f26" containerID="84fb5c688acdbff089b3dad56105ecc08ec3ee6f2fc9c18d76c5f8255ff90601" exitCode=0 Nov 29 08:26:11 crc kubenswrapper[4943]: I1129 08:26:11.107603 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" event={"ID":"6b73935b-d04c-42ff-9921-38133b298f26","Type":"ContainerDied","Data":"84fb5c688acdbff089b3dad56105ecc08ec3ee6f2fc9c18d76c5f8255ff90601"} Nov 29 08:26:11 crc kubenswrapper[4943]: I1129 08:26:11.107917 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" event={"ID":"6b73935b-d04c-42ff-9921-38133b298f26","Type":"ContainerStarted","Data":"58cf1765fb705dc92441ced841c889c777cdff81cb1e28fc990b9e253facb128"} Nov 29 08:26:11 crc kubenswrapper[4943]: I1129 08:26:11.149864 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-qzbw7"] Nov 29 08:26:11 crc kubenswrapper[4943]: I1129 08:26:11.159369 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-q4cgs/crc-debug-qzbw7"] Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.216642 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.417963 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host\") pod \"6b73935b-d04c-42ff-9921-38133b298f26\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.418031 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dp9dv\" (UniqueName: \"kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv\") pod \"6b73935b-d04c-42ff-9921-38133b298f26\" (UID: \"6b73935b-d04c-42ff-9921-38133b298f26\") " Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.418112 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host" (OuterVolumeSpecName: "host") pod "6b73935b-d04c-42ff-9921-38133b298f26" (UID: "6b73935b-d04c-42ff-9921-38133b298f26"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.418833 4943 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6b73935b-d04c-42ff-9921-38133b298f26-host\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.424929 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv" (OuterVolumeSpecName: "kube-api-access-dp9dv") pod "6b73935b-d04c-42ff-9921-38133b298f26" (UID: "6b73935b-d04c-42ff-9921-38133b298f26"). InnerVolumeSpecName "kube-api-access-dp9dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:26:12 crc kubenswrapper[4943]: I1129 08:26:12.521893 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dp9dv\" (UniqueName: \"kubernetes.io/projected/6b73935b-d04c-42ff-9921-38133b298f26-kube-api-access-dp9dv\") on node \"crc\" DevicePath \"\"" Nov 29 08:26:13 crc kubenswrapper[4943]: I1129 08:26:13.138152 4943 scope.go:117] "RemoveContainer" containerID="84fb5c688acdbff089b3dad56105ecc08ec3ee6f2fc9c18d76c5f8255ff90601" Nov 29 08:26:13 crc kubenswrapper[4943]: I1129 08:26:13.138423 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/crc-debug-qzbw7" Nov 29 08:26:13 crc kubenswrapper[4943]: I1129 08:26:13.342655 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b73935b-d04c-42ff-9921-38133b298f26" path="/var/lib/kubelet/pods/6b73935b-d04c-42ff-9921-38133b298f26/volumes" Nov 29 08:26:21 crc kubenswrapper[4943]: I1129 08:26:21.327638 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:26:21 crc kubenswrapper[4943]: E1129 08:26:21.328487 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:33 crc kubenswrapper[4943]: I1129 08:26:33.327933 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:26:33 crc kubenswrapper[4943]: E1129 08:26:33.328881 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:46 crc kubenswrapper[4943]: I1129 08:26:46.327411 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:26:46 crc kubenswrapper[4943]: E1129 08:26:46.328339 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:48 crc kubenswrapper[4943]: I1129 08:26:48.198996 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5648447994-lm7pg_444ebf94-f3eb-4f21-9a69-7730d465c3b6/barbican-api/0.log" Nov 29 08:26:48 crc kubenswrapper[4943]: I1129 08:26:48.444492 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5666c9f9fb-m5bqf_9e6b1af0-828e-4bc6-afc5-ae9728bf0f62/barbican-keystone-listener/0.log" Nov 29 08:26:48 crc kubenswrapper[4943]: I1129 08:26:48.687885 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5666c9f9fb-m5bqf_9e6b1af0-828e-4bc6-afc5-ae9728bf0f62/barbican-keystone-listener-log/0.log" Nov 29 08:26:48 crc kubenswrapper[4943]: I1129 08:26:48.713309 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-65f55968c5-wz8zv_09f8aed7-018b-4cc9-aead-9c93d1863e10/barbican-worker/0.log" Nov 29 08:26:48 crc kubenswrapper[4943]: I1129 08:26:48.899493 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-65f55968c5-wz8zv_09f8aed7-018b-4cc9-aead-9c93d1863e10/barbican-worker-log/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.038261 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-dplrg_dcee2b53-5ddc-4a3e-afac-ffe2812f24e4/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.135609 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5648447994-lm7pg_444ebf94-f3eb-4f21-9a69-7730d465c3b6/barbican-api-log/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.378314 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_969ecbf0-186b-4121-b56a-998af3fc2e9e/ceilometer-notification-agent/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.403364 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_969ecbf0-186b-4121-b56a-998af3fc2e9e/ceilometer-central-agent/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.419068 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_969ecbf0-186b-4121-b56a-998af3fc2e9e/proxy-httpd/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.526604 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_969ecbf0-186b-4121-b56a-998af3fc2e9e/sg-core/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.608207 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-p4twb_ab8b2f78-eaa4-468f-84c6-06dcd72cd5ff/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:49 crc kubenswrapper[4943]: I1129 08:26:49.712454 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-ll74l_5d32eb7f-e003-4ace-be60-a38ea0701477/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:50 crc kubenswrapper[4943]: I1129 08:26:50.244747 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9bda2d0c-0a79-463c-a457-1dbaf300a6f9/probe/0.log" Nov 29 08:26:50 crc kubenswrapper[4943]: I1129 08:26:50.602598 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c/cinder-api-log/0.log" Nov 29 08:26:50 crc kubenswrapper[4943]: I1129 08:26:50.612500 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8943a5ae-79c8-4ea7-a4fb-eba8cb993f1c/cinder-api/0.log" Nov 29 08:26:50 crc kubenswrapper[4943]: I1129 08:26:50.784139 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_542148bb-e707-4112-bc40-5b2272056dd1/cinder-scheduler/0.log" Nov 29 08:26:50 crc kubenswrapper[4943]: I1129 08:26:50.891503 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_542148bb-e707-4112-bc40-5b2272056dd1/probe/0.log" Nov 29 08:26:51 crc kubenswrapper[4943]: I1129 08:26:51.143325 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d77425c2-2838-4b1e-9847-c36e8228920e/probe/0.log" Nov 29 08:26:51 crc kubenswrapper[4943]: I1129 08:26:51.412049 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-486w8_e3504f2d-9001-4652-8404-4f2ac4265eef/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:51 crc kubenswrapper[4943]: I1129 08:26:51.547117 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-gvz8b_fa69db4d-9f15-4890-bf12-437377c8f3a7/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:51 crc kubenswrapper[4943]: I1129 08:26:51.684325 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-mtv4l_b383eee2-b374-463b-8cdc-429b84772fcf/init/0.log" Nov 29 08:26:51 crc kubenswrapper[4943]: I1129 08:26:51.819298 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-mtv4l_b383eee2-b374-463b-8cdc-429b84772fcf/init/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.002599 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-mtv4l_b383eee2-b374-463b-8cdc-429b84772fcf/dnsmasq-dns/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.093801 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_34631d9e-04d9-4560-9535-16ae6c60da19/glance-httpd/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.171833 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_34631d9e-04d9-4560-9535-16ae6c60da19/glance-log/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.340779 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_82f5ccb7-ce13-4e07-9852-be76cbb9dda6/glance-log/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.409699 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_82f5ccb7-ce13-4e07-9852-be76cbb9dda6/glance-httpd/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.698170 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68cf6db5b8-5ckcr_d7af4d3f-647b-437d-8ab3-d8bb4debb25a/horizon/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.829229 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9bda2d0c-0a79-463c-a457-1dbaf300a6f9/cinder-backup/0.log" Nov 29 08:26:52 crc kubenswrapper[4943]: I1129 08:26:52.983094 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-gwj89_718531e1-453b-4e16-a497-d3af7c97b9ed/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:53 crc kubenswrapper[4943]: I1129 08:26:53.089755 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68cf6db5b8-5ckcr_d7af4d3f-647b-437d-8ab3-d8bb4debb25a/horizon-log/0.log" Nov 29 08:26:53 crc kubenswrapper[4943]: I1129 08:26:53.253669 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-q7flv_dfa047b7-dae4-49f4-9cfd-5e9492f3620f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:53 crc kubenswrapper[4943]: I1129 08:26:53.611524 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29406721-ljt9k_ed4ca698-c9b4-458e-bf3a-ffba01a3c728/keystone-cron/0.log" Nov 29 08:26:53 crc kubenswrapper[4943]: I1129 08:26:53.747488 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_192defe0-3319-458c-b5d6-220b1a641b88/kube-state-metrics/0.log" Nov 29 08:26:53 crc kubenswrapper[4943]: I1129 08:26:53.970082 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-42f5w_40fddfb3-2d70-4d01-9da7-9c2a718c4962/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.181709 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_128c6e68-82d5-4b0b-9734-3f86bd29385e/manila-api-log/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.231464 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_128c6e68-82d5-4b0b-9734-3f86bd29385e/manila-api/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.429259 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_128756d3-2ba0-493a-8ee8-c33f6312be8f/probe/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.436673 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6f668984b9-kxcdm_6d5336ed-f3f6-4d44-9f80-d6b00646c97d/keystone-api/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.498869 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_128756d3-2ba0-493a-8ee8-c33f6312be8f/manila-scheduler/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.663680 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_52fc0d8a-63e5-4950-8dbd-88d88e77c913/probe/0.log" Nov 29 08:26:54 crc kubenswrapper[4943]: I1129 08:26:54.696020 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_52fc0d8a-63e5-4950-8dbd-88d88e77c913/manila-share/0.log" Nov 29 08:26:55 crc kubenswrapper[4943]: I1129 08:26:55.211603 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-64478cdc57-fhzpm_1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd/neutron-httpd/0.log" Nov 29 08:26:55 crc kubenswrapper[4943]: I1129 08:26:55.321863 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-64478cdc57-fhzpm_1ff8ee17-b659-4a6e-8a68-ae4e4e6610cd/neutron-api/0.log" Nov 29 08:26:55 crc kubenswrapper[4943]: I1129 08:26:55.392943 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-vvtpk_5d1fa725-e02d-4a45-b50b-2879d165555b/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:56 crc kubenswrapper[4943]: I1129 08:26:56.096470 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_3e86cce9-f739-49de-8b00-6ad1ae54d725/nova-cell0-conductor-conductor/0.log" Nov 29 08:26:56 crc kubenswrapper[4943]: I1129 08:26:56.306214 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7a73d989-5ec4-4b31-a47b-7b9bcda756cf/nova-api-log/0.log" Nov 29 08:26:56 crc kubenswrapper[4943]: I1129 08:26:56.614725 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_25ce347f-5a2e-4fc1-81c2-d4b9e1a58a73/nova-cell1-conductor-conductor/0.log" Nov 29 08:26:56 crc kubenswrapper[4943]: I1129 08:26:56.866157 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_9511ac38-5f33-48e0-a59a-197e253fbc8e/nova-cell1-novncproxy-novncproxy/0.log" Nov 29 08:26:56 crc kubenswrapper[4943]: I1129 08:26:56.874215 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_7a73d989-5ec4-4b31-a47b-7b9bcda756cf/nova-api-api/0.log" Nov 29 08:26:57 crc kubenswrapper[4943]: I1129 08:26:57.085831 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-67tgw_daf70ffe-3569-4332-8140-3cbaaa3d8db9/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:26:57 crc kubenswrapper[4943]: I1129 08:26:57.295276 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6a8c1a66-0365-4e4a-8c6c-6721a50e7468/nova-metadata-log/0.log" Nov 29 08:26:57 crc kubenswrapper[4943]: I1129 08:26:57.852997 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_c35e524d-9a33-483b-a6d8-98a7ace4b632/nova-scheduler-scheduler/0.log" Nov 29 08:26:57 crc kubenswrapper[4943]: I1129 08:26:57.957777 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a756fe5f-1037-4ec3-b91a-fdce5d723f04/mysql-bootstrap/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.186735 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a756fe5f-1037-4ec3-b91a-fdce5d723f04/mysql-bootstrap/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.222260 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a756fe5f-1037-4ec3-b91a-fdce5d723f04/galera/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.445489 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_57b1dafb-1dfa-4f23-8335-50600bc5becb/mysql-bootstrap/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.609425 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_57b1dafb-1dfa-4f23-8335-50600bc5becb/mysql-bootstrap/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.704778 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_57b1dafb-1dfa-4f23-8335-50600bc5becb/galera/0.log" Nov 29 08:26:58 crc kubenswrapper[4943]: I1129 08:26:58.899424 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_21d04d3f-3885-4bd5-a28a-7539ab86bf24/openstackclient/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.094854 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-5gpxg_af59b739-6a81-44bf-a1f6-2d6d3038c43f/ovn-controller/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.315924 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-k47vk_2736d08a-dfca-4da2-bae5-7917c31200c1/openstack-network-exporter/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.327111 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:26:59 crc kubenswrapper[4943]: E1129 08:26:59.327344 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.571250 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gfvtr_1311e160-08fd-4e7e-9599-031cdf056c62/ovsdb-server-init/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.708782 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gfvtr_1311e160-08fd-4e7e-9599-031cdf056c62/ovsdb-server-init/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.775478 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gfvtr_1311e160-08fd-4e7e-9599-031cdf056c62/ovs-vswitchd/0.log" Nov 29 08:26:59 crc kubenswrapper[4943]: I1129 08:26:59.922665 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gfvtr_1311e160-08fd-4e7e-9599-031cdf056c62/ovsdb-server/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.183907 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-m4thk_7bcc7c5b-722b-40fe-a07e-789d7abf95b2/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.270885 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_6a8c1a66-0365-4e4a-8c6c-6721a50e7468/nova-metadata-metadata/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.351870 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c300f9b-996e-4271-992d-932fbbb5e64f/openstack-network-exporter/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.460179 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2c300f9b-996e-4271-992d-932fbbb5e64f/ovn-northd/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.586747 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_740a8879-98ab-4937-9d34-8c8563d3c852/openstack-network-exporter/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.688547 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d77425c2-2838-4b1e-9847-c36e8228920e/cinder-volume/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.728233 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_740a8879-98ab-4937-9d34-8c8563d3c852/ovsdbserver-nb/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.850205 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d577552f-76e9-4b0f-9ab1-74aec5d11704/openstack-network-exporter/0.log" Nov 29 08:27:00 crc kubenswrapper[4943]: I1129 08:27:00.901739 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d577552f-76e9-4b0f-9ab1-74aec5d11704/ovsdbserver-sb/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.165223 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-654bfbd6d-pmg6n_b3eec5fd-1275-4ea7-bd28-9e468ebd2e41/placement-api/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.247065 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-654bfbd6d-pmg6n_b3eec5fd-1275-4ea7-bd28-9e468ebd2e41/placement-log/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.299753 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ba80f24b-269a-42ff-b97e-94623499b030/setup-container/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.456008 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ba80f24b-269a-42ff-b97e-94623499b030/setup-container/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.525877 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ba80f24b-269a-42ff-b97e-94623499b030/rabbitmq/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.546388 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b2373f83-f75a-4f85-a8dd-133f36458591/setup-container/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.856695 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b2373f83-f75a-4f85-a8dd-133f36458591/setup-container/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.908895 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b2373f83-f75a-4f85-a8dd-133f36458591/rabbitmq/0.log" Nov 29 08:27:01 crc kubenswrapper[4943]: I1129 08:27:01.996451 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-stj5v_988e60e7-eeec-4335-8f34-8a0f7228dd59/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.171015 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-rcmtc_87f41bfa-9923-4bbe-b23c-229a8e7223af/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.263497 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-jcp4z_1f3aaa0c-0442-4417-8963-ad0640858384/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.452598 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-xjccs_d72cb85c-1d10-4baf-bdfb-ff2c9f927f43/ssh-known-hosts-edpm-deployment/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.698606 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_19a81291-2c8c-4099-a701-7f4049f4e890/tempest-tests-tempest-tests-runner/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.742067 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_346fe3c9-1f3c-487e-9e9d-b10d780a7d11/test-operator-logs-container/0.log" Nov 29 08:27:02 crc kubenswrapper[4943]: I1129 08:27:02.913189 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-q5mr2_d949007e-fcdf-4a59-a11c-bc2c210c1f58/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 08:27:03 crc kubenswrapper[4943]: I1129 08:27:03.517038 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a2be496c-a331-4baf-b42b-453be5225812/memcached/0.log" Nov 29 08:27:14 crc kubenswrapper[4943]: I1129 08:27:14.328022 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:27:14 crc kubenswrapper[4943]: E1129 08:27:14.331802 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.254986 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-94jbz_c9815594-e69f-411a-9bf1-b0c064eb5180/kube-rbac-proxy/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.336205 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-94jbz_c9815594-e69f-411a-9bf1-b0c064eb5180/manager/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.449294 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-546795bfb5-9dvgz_5cfbb650-258a-4cf5-8ada-c721fa5aee9a/kube-rbac-proxy/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.504206 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-546795bfb5-9dvgz_5cfbb650-258a-4cf5-8ada-c721fa5aee9a/manager/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.665846 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-ncg7s_5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55/kube-rbac-proxy/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.673912 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-ncg7s_5b3a7a05-9e00-4f3d-be4d-9536f7ee2e55/manager/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.732720 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/util/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.903700 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/pull/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.939448 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/util/0.log" Nov 29 08:27:24 crc kubenswrapper[4943]: I1129 08:27:24.942705 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/pull/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.080502 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/pull/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.104887 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/util/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.107502 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e4eb9d67f45569077a7459cbf78e395265fc25b8dac5136f151f67defd6fkzq_0d6d1cb4-1e63-4910-9dbc-166a3a8c331d/extract/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.273601 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-knfbt_868d0e27-4b0a-4cb0-a2a6-13d58e257c8f/kube-rbac-proxy/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.403060 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-rp427_d7ba2d7b-5840-4cc7-95d9-1953d8c0250b/kube-rbac-proxy/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.415167 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-knfbt_868d0e27-4b0a-4cb0-a2a6-13d58e257c8f/manager/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.465518 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-rp427_d7ba2d7b-5840-4cc7-95d9-1953d8c0250b/manager/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.578813 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-6hb4z_b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7/kube-rbac-proxy/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.602420 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-6hb4z_b8f2b43c-dd75-44aa-b6b5-365ab01ec0e7/manager/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.742754 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-kg4v7_0ec91d76-10f1-458f-b999-6212f13f5e18/kube-rbac-proxy/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.918656 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-zqc94_ac03d0ce-8e35-479d-9f8d-06e05955d2ce/kube-rbac-proxy/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.937838 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-kg4v7_0ec91d76-10f1-458f-b999-6212f13f5e18/manager/0.log" Nov 29 08:27:25 crc kubenswrapper[4943]: I1129 08:27:25.999370 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-zqc94_ac03d0ce-8e35-479d-9f8d-06e05955d2ce/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.084933 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-v6d5z_0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f/kube-rbac-proxy/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.182388 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-v6d5z_0934bcaa-1ff0-4fd1-ae90-bbcdb1fcf26f/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.282363 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-wchm4_c35fc2d0-5f79-4edf-86ec-d6f1add18551/kube-rbac-proxy/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.400528 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-wchm4_c35fc2d0-5f79-4edf-86ec-d6f1add18551/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.464596 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tg7s9_b979c1f8-20ad-4694-98e5-674738e37f4c/kube-rbac-proxy/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.588501 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-jq6kn_74ea0cf0-4b53-4342-9ea4-c2e4db748104/kube-rbac-proxy/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.612230 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tg7s9_b979c1f8-20ad-4694-98e5-674738e37f4c/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.701365 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-jq6kn_74ea0cf0-4b53-4342-9ea4-c2e4db748104/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.817694 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-hrvxb_f98a9e94-a4bf-4980-9ab0-efd202b4ee30/kube-rbac-proxy/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.917424 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-hrvxb_f98a9e94-a4bf-4980-9ab0-efd202b4ee30/manager/0.log" Nov 29 08:27:26 crc kubenswrapper[4943]: I1129 08:27:26.983674 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-v2kbm_980cdf9e-3763-4aca-a92a-1f4ca61c1501/kube-rbac-proxy/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.042402 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-v2kbm_980cdf9e-3763-4aca-a92a-1f4ca61c1501/manager/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.152098 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw_882924cc-0259-4c98-b40a-1c02eeadaa09/kube-rbac-proxy/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.172541 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4kjxrw_882924cc-0259-4c98-b40a-1c02eeadaa09/manager/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.531687 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-dqtsc_c683af0c-166d-4bac-9c73-5d9d13d32f81/registry-server/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.592003 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-56cc9fd6f6-q77pg_0215659c-bbb4-43cd-a403-ecaa380a2224/operator/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.724587 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-bwq9n_d4923779-fef3-444b-9276-5ca610c71fd4/kube-rbac-proxy/0.log" Nov 29 08:27:27 crc kubenswrapper[4943]: I1129 08:27:27.897794 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-bwq9n_d4923779-fef3-444b-9276-5ca610c71fd4/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.083830 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-jgxw4_b60c121d-d0c2-4e2e-be92-6da802d74dd6/kube-rbac-proxy/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.170060 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-jgxw4_b60c121d-d0c2-4e2e-be92-6da802d74dd6/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.223337 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fvhlh_83a6e478-8cc1-4061-b06f-e0b9faf51ede/operator/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.326912 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:27:28 crc kubenswrapper[4943]: E1129 08:27:28.327340 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.352869 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-vbk9x_c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.411241 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-vbk9x_c8c7d6b1-e72c-4dae-ba96-1e87c2a54fa0/kube-rbac-proxy/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.477047 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-ll829_66d0d275-3a0b-45a1-8b57-dc7ec4559888/kube-rbac-proxy/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.661395 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-ll829_66d0d275-3a0b-45a1-8b57-dc7ec4559888/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.706333 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-jsgws_7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.709542 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-cb46f9b59-7249w_e75ee72e-f7fe-45a2-81f1-06ed23649f89/manager/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.714523 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-jsgws_7e1a6ed4-d7ec-4703-8acb-16ea3f99d8f3/kube-rbac-proxy/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.836601 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-f4nk6_4602188b-0cc4-4b1e-80e8-a2f40fd43da0/kube-rbac-proxy/0.log" Nov 29 08:27:28 crc kubenswrapper[4943]: I1129 08:27:28.906996 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-f4nk6_4602188b-0cc4-4b1e-80e8-a2f40fd43da0/manager/0.log" Nov 29 08:27:43 crc kubenswrapper[4943]: I1129 08:27:43.327641 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:27:43 crc kubenswrapper[4943]: E1129 08:27:43.328348 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:27:46 crc kubenswrapper[4943]: I1129 08:27:46.380843 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-7mcpf_655ce663-9a0e-4ce9-bdaf-e614234ac533/control-plane-machine-set-operator/0.log" Nov 29 08:27:46 crc kubenswrapper[4943]: I1129 08:27:46.518969 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z2hf5_afc2af4e-e064-41fd-8fcf-e184be168a9a/kube-rbac-proxy/0.log" Nov 29 08:27:46 crc kubenswrapper[4943]: I1129 08:27:46.559511 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-z2hf5_afc2af4e-e064-41fd-8fcf-e184be168a9a/machine-api-operator/0.log" Nov 29 08:27:54 crc kubenswrapper[4943]: I1129 08:27:54.327986 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:27:54 crc kubenswrapper[4943]: E1129 08:27:54.331851 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:27:57 crc kubenswrapper[4943]: I1129 08:27:57.862836 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-x7697_77f68ba4-9118-4316-9cac-72735ebef023/cert-manager-controller/0.log" Nov 29 08:27:58 crc kubenswrapper[4943]: I1129 08:27:58.053691 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-wc5rn_ae8c2b0c-7dcb-4b7f-a8c0-340e42f58d0c/cert-manager-cainjector/0.log" Nov 29 08:27:58 crc kubenswrapper[4943]: I1129 08:27:58.116454 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-zfdnj_71f55aa3-b804-4307-96c1-fa7829f7d7d4/cert-manager-webhook/0.log" Nov 29 08:28:06 crc kubenswrapper[4943]: I1129 08:28:06.327533 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:28:06 crc kubenswrapper[4943]: E1129 08:28:06.328373 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:28:09 crc kubenswrapper[4943]: I1129 08:28:09.946779 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-fsz5q_4f1e28f0-85d9-4d51-900a-33ab52c3d087/nmstate-console-plugin/0.log" Nov 29 08:28:10 crc kubenswrapper[4943]: I1129 08:28:10.129594 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-kf6kc_32aeaef6-5f98-42e2-97c3-65c6494f256c/nmstate-handler/0.log" Nov 29 08:28:10 crc kubenswrapper[4943]: I1129 08:28:10.174882 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-nmd65_83d362f9-3c5d-4ee3-98fd-dea1eec92b79/nmstate-metrics/0.log" Nov 29 08:28:10 crc kubenswrapper[4943]: I1129 08:28:10.209938 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-nmd65_83d362f9-3c5d-4ee3-98fd-dea1eec92b79/kube-rbac-proxy/0.log" Nov 29 08:28:10 crc kubenswrapper[4943]: I1129 08:28:10.361348 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-8dprb_06fa1be5-8c71-42da-83ab-0e436d55137b/nmstate-operator/0.log" Nov 29 08:28:10 crc kubenswrapper[4943]: I1129 08:28:10.429012 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-7xj8x_b458e00f-1442-452e-9588-5c4b822e1bf8/nmstate-webhook/0.log" Nov 29 08:28:21 crc kubenswrapper[4943]: I1129 08:28:21.328493 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:28:21 crc kubenswrapper[4943]: E1129 08:28:21.330063 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:28:24 crc kubenswrapper[4943]: I1129 08:28:24.679650 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-qhsdq_c2c18b1d-2ac5-4321-a137-250557efd955/kube-rbac-proxy/0.log" Nov 29 08:28:24 crc kubenswrapper[4943]: I1129 08:28:24.745698 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-qhsdq_c2c18b1d-2ac5-4321-a137-250557efd955/controller/0.log" Nov 29 08:28:24 crc kubenswrapper[4943]: I1129 08:28:24.834819 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-frr-files/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.010105 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-reloader/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.010133 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-frr-files/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.043772 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-reloader/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.044341 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-metrics/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.231302 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-frr-files/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.235167 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-reloader/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.261628 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-metrics/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.276777 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-metrics/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.431188 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-frr-files/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.445646 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-metrics/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.451278 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/cp-reloader/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.456037 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/controller/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.635545 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/kube-rbac-proxy/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.639634 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/frr-metrics/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.681902 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/kube-rbac-proxy-frr/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.918332 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/reloader/0.log" Nov 29 08:28:25 crc kubenswrapper[4943]: I1129 08:28:25.950954 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-mt5gz_65c5f0ea-c60a-4abd-b490-8bccde64fbc2/frr-k8s-webhook-server/0.log" Nov 29 08:28:26 crc kubenswrapper[4943]: I1129 08:28:26.139673 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-55bbfd58d6-j4fhq_07f76da9-c3b8-4c16-b4d8-d1eeb4e2a433/manager/0.log" Nov 29 08:28:26 crc kubenswrapper[4943]: I1129 08:28:26.356383 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c4cd9bdff-jvhwq_408be66a-ef5a-472c-aa9f-31e20666f3ee/webhook-server/0.log" Nov 29 08:28:26 crc kubenswrapper[4943]: I1129 08:28:26.443429 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7q88r_f60f2450-9ce3-4b8c-b268-6a0aab1cb075/kube-rbac-proxy/0.log" Nov 29 08:28:27 crc kubenswrapper[4943]: I1129 08:28:27.068817 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7q88r_f60f2450-9ce3-4b8c-b268-6a0aab1cb075/speaker/0.log" Nov 29 08:28:27 crc kubenswrapper[4943]: I1129 08:28:27.382932 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dt7vh_4ad8fae5-ebf3-406c-b971-f15b1978e82c/frr/0.log" Nov 29 08:28:35 crc kubenswrapper[4943]: I1129 08:28:35.333919 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:28:35 crc kubenswrapper[4943]: E1129 08:28:35.334791 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:28:38 crc kubenswrapper[4943]: I1129 08:28:38.765144 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/util/0.log" Nov 29 08:28:38 crc kubenswrapper[4943]: I1129 08:28:38.926866 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/util/0.log" Nov 29 08:28:38 crc kubenswrapper[4943]: I1129 08:28:38.981763 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/pull/0.log" Nov 29 08:28:38 crc kubenswrapper[4943]: I1129 08:28:38.985428 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/pull/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.191822 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/pull/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.206943 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/extract/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.231338 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f674mf_cfb30518-7399-46a6-8755-379d920dfa8a/util/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.366732 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/util/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.547617 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/util/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.558075 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/pull/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.573103 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/pull/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.769510 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/util/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.778936 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/extract/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.782733 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bdcrm_f4141f7e-d480-47fb-9471-727df764b4ec/pull/0.log" Nov 29 08:28:39 crc kubenswrapper[4943]: I1129 08:28:39.966994 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-utilities/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.121834 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-content/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.138154 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-utilities/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.167123 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-content/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.318622 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-content/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.369469 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/extract-utilities/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.559370 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-utilities/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.734886 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-utilities/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.749232 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-content/0.log" Nov 29 08:28:40 crc kubenswrapper[4943]: I1129 08:28:40.890255 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-content/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.029383 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-utilities/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.198441 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/extract-content/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.337844 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7s6bv_ad10da80-663e-4d61-b15c-76e7b3005c5a/registry-server/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.424754 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-mzndv_688a93bc-3061-40cb-a122-02b679922465/marketplace-operator/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.504018 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mm6h6_b1f699ce-49f8-415d-a493-f43d14ab9f53/registry-server/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.626159 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-utilities/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.817657 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-content/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.825825 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-content/0.log" Nov 29 08:28:41 crc kubenswrapper[4943]: I1129 08:28:41.829724 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-utilities/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.083779 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-content/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.097405 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/extract-utilities/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.308344 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-s79pz_48de0189-2ca0-4efc-abfc-50d22fb3abe6/registry-server/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.332308 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-utilities/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.521393 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-content/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.541106 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-utilities/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.549892 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-content/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.705905 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-content/0.log" Nov 29 08:28:42 crc kubenswrapper[4943]: I1129 08:28:42.710908 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/extract-utilities/0.log" Nov 29 08:28:43 crc kubenswrapper[4943]: I1129 08:28:43.553284 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-q8nkf_e6ac6c9f-2c2e-4a5d-b4c7-1c0a3486313f/registry-server/0.log" Nov 29 08:28:48 crc kubenswrapper[4943]: I1129 08:28:48.327924 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:28:48 crc kubenswrapper[4943]: E1129 08:28:48.328725 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:00 crc kubenswrapper[4943]: I1129 08:29:00.327371 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:29:00 crc kubenswrapper[4943]: E1129 08:29:00.328153 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:04 crc kubenswrapper[4943]: E1129 08:29:04.407287 4943 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.148:35396->38.102.83.148:44389: write tcp 38.102.83.148:35396->38.102.83.148:44389: write: broken pipe Nov 29 08:29:11 crc kubenswrapper[4943]: I1129 08:29:11.327536 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:29:11 crc kubenswrapper[4943]: E1129 08:29:11.328632 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.185745 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:22 crc kubenswrapper[4943]: E1129 08:29:22.186900 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b73935b-d04c-42ff-9921-38133b298f26" containerName="container-00" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.186916 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b73935b-d04c-42ff-9921-38133b298f26" containerName="container-00" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.187343 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b73935b-d04c-42ff-9921-38133b298f26" containerName="container-00" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.189089 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.221652 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.287875 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.287968 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.288004 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t7wn\" (UniqueName: \"kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.389427 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.389628 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.389772 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t7wn\" (UniqueName: \"kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.390022 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.390149 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.419694 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t7wn\" (UniqueName: \"kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn\") pod \"redhat-operators-cxkpj\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.510384 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:22 crc kubenswrapper[4943]: I1129 08:29:22.994869 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:23 crc kubenswrapper[4943]: E1129 08:29:23.376759 4943 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0677495d_8c1d_4630_b389_aaeb53034710.slice/crio-conmon-441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0677495d_8c1d_4630_b389_aaeb53034710.slice/crio-441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906.scope\": RecentStats: unable to find data in memory cache]" Nov 29 08:29:23 crc kubenswrapper[4943]: I1129 08:29:23.745750 4943 generic.go:334] "Generic (PLEG): container finished" podID="0677495d-8c1d-4630-b389-aaeb53034710" containerID="441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906" exitCode=0 Nov 29 08:29:23 crc kubenswrapper[4943]: I1129 08:29:23.745791 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerDied","Data":"441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906"} Nov 29 08:29:23 crc kubenswrapper[4943]: I1129 08:29:23.745814 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerStarted","Data":"02e248b77f65209a098ea4269fcdf19b3c262dc3fe4b434e0b680eba18f87423"} Nov 29 08:29:24 crc kubenswrapper[4943]: I1129 08:29:24.756624 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerStarted","Data":"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177"} Nov 29 08:29:26 crc kubenswrapper[4943]: I1129 08:29:26.327327 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:29:26 crc kubenswrapper[4943]: E1129 08:29:26.327895 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:30 crc kubenswrapper[4943]: I1129 08:29:30.810082 4943 generic.go:334] "Generic (PLEG): container finished" podID="0677495d-8c1d-4630-b389-aaeb53034710" containerID="abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177" exitCode=0 Nov 29 08:29:30 crc kubenswrapper[4943]: I1129 08:29:30.810159 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerDied","Data":"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177"} Nov 29 08:29:31 crc kubenswrapper[4943]: I1129 08:29:31.823165 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerStarted","Data":"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15"} Nov 29 08:29:31 crc kubenswrapper[4943]: I1129 08:29:31.845550 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cxkpj" podStartSLOduration=2.098442809 podStartE2EDuration="9.845530068s" podCreationTimestamp="2025-11-29 08:29:22 +0000 UTC" firstStartedPulling="2025-11-29 08:29:23.747702293 +0000 UTC m=+6938.677791046" lastFinishedPulling="2025-11-29 08:29:31.494789552 +0000 UTC m=+6946.424878305" observedRunningTime="2025-11-29 08:29:31.842397953 +0000 UTC m=+6946.772486716" watchObservedRunningTime="2025-11-29 08:29:31.845530068 +0000 UTC m=+6946.775618831" Nov 29 08:29:32 crc kubenswrapper[4943]: I1129 08:29:32.511442 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:32 crc kubenswrapper[4943]: I1129 08:29:32.511488 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:33 crc kubenswrapper[4943]: I1129 08:29:33.562118 4943 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cxkpj" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="registry-server" probeResult="failure" output=< Nov 29 08:29:33 crc kubenswrapper[4943]: timeout: failed to connect service ":50051" within 1s Nov 29 08:29:33 crc kubenswrapper[4943]: > Nov 29 08:29:40 crc kubenswrapper[4943]: I1129 08:29:40.327906 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:29:40 crc kubenswrapper[4943]: E1129 08:29:40.328813 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.529326 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.534253 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.542536 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.675064 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.675171 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vqgh\" (UniqueName: \"kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.675331 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.776924 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.777357 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vqgh\" (UniqueName: \"kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.777381 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.777520 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.777855 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.800658 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vqgh\" (UniqueName: \"kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh\") pod \"redhat-marketplace-rvdnq\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:41 crc kubenswrapper[4943]: I1129 08:29:41.866799 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.381071 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.579446 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.629430 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.942601 4943 generic.go:334] "Generic (PLEG): container finished" podID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerID="ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485" exitCode=0 Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.942720 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerDied","Data":"ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485"} Nov 29 08:29:42 crc kubenswrapper[4943]: I1129 08:29:42.942745 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerStarted","Data":"6a5331ca6b8035be17a3016604517367a087a7101f147c0180e02ff5388cf2ad"} Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.108420 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.109140 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cxkpj" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="registry-server" containerID="cri-o://e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15" gracePeriod=2 Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.592440 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.731923 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t7wn\" (UniqueName: \"kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn\") pod \"0677495d-8c1d-4630-b389-aaeb53034710\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.732006 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content\") pod \"0677495d-8c1d-4630-b389-aaeb53034710\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.732116 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities\") pod \"0677495d-8c1d-4630-b389-aaeb53034710\" (UID: \"0677495d-8c1d-4630-b389-aaeb53034710\") " Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.733226 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities" (OuterVolumeSpecName: "utilities") pod "0677495d-8c1d-4630-b389-aaeb53034710" (UID: "0677495d-8c1d-4630-b389-aaeb53034710"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.737929 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn" (OuterVolumeSpecName: "kube-api-access-6t7wn") pod "0677495d-8c1d-4630-b389-aaeb53034710" (UID: "0677495d-8c1d-4630-b389-aaeb53034710"). InnerVolumeSpecName "kube-api-access-6t7wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.834134 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0677495d-8c1d-4630-b389-aaeb53034710" (UID: "0677495d-8c1d-4630-b389-aaeb53034710"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.835369 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t7wn\" (UniqueName: \"kubernetes.io/projected/0677495d-8c1d-4630-b389-aaeb53034710-kube-api-access-6t7wn\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.835435 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.835449 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0677495d-8c1d-4630-b389-aaeb53034710-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.961071 4943 generic.go:334] "Generic (PLEG): container finished" podID="0677495d-8c1d-4630-b389-aaeb53034710" containerID="e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15" exitCode=0 Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.961126 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxkpj" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.961138 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerDied","Data":"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15"} Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.961849 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxkpj" event={"ID":"0677495d-8c1d-4630-b389-aaeb53034710","Type":"ContainerDied","Data":"02e248b77f65209a098ea4269fcdf19b3c262dc3fe4b434e0b680eba18f87423"} Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.961874 4943 scope.go:117] "RemoveContainer" containerID="e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15" Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.964556 4943 generic.go:334] "Generic (PLEG): container finished" podID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerID="55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d" exitCode=0 Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.964605 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerDied","Data":"55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d"} Nov 29 08:29:44 crc kubenswrapper[4943]: I1129 08:29:44.984825 4943 scope.go:117] "RemoveContainer" containerID="abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.015339 4943 scope.go:117] "RemoveContainer" containerID="441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.018916 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.027360 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cxkpj"] Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.056055 4943 scope.go:117] "RemoveContainer" containerID="e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15" Nov 29 08:29:45 crc kubenswrapper[4943]: E1129 08:29:45.056604 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15\": container with ID starting with e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15 not found: ID does not exist" containerID="e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.057665 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15"} err="failed to get container status \"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15\": rpc error: code = NotFound desc = could not find container \"e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15\": container with ID starting with e55e0d248c8c2642b54f0632f0f7cd705846b278701e3297acaba2e11bda5f15 not found: ID does not exist" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.057773 4943 scope.go:117] "RemoveContainer" containerID="abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177" Nov 29 08:29:45 crc kubenswrapper[4943]: E1129 08:29:45.058279 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177\": container with ID starting with abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177 not found: ID does not exist" containerID="abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.058322 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177"} err="failed to get container status \"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177\": rpc error: code = NotFound desc = could not find container \"abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177\": container with ID starting with abd67c00cdb3534931c13392b9ac71d6adfdf62780cd470e5f0d51a2035a8177 not found: ID does not exist" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.058349 4943 scope.go:117] "RemoveContainer" containerID="441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906" Nov 29 08:29:45 crc kubenswrapper[4943]: E1129 08:29:45.058688 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906\": container with ID starting with 441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906 not found: ID does not exist" containerID="441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.058714 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906"} err="failed to get container status \"441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906\": rpc error: code = NotFound desc = could not find container \"441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906\": container with ID starting with 441ee90b8af493b011edcfde8841e94d53a3a5dbffc36967d88462394e940906 not found: ID does not exist" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.348677 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0677495d-8c1d-4630-b389-aaeb53034710" path="/var/lib/kubelet/pods/0677495d-8c1d-4630-b389-aaeb53034710/volumes" Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.974546 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerStarted","Data":"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332"} Nov 29 08:29:45 crc kubenswrapper[4943]: I1129 08:29:45.993343 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rvdnq" podStartSLOduration=2.516044665 podStartE2EDuration="4.993325379s" podCreationTimestamp="2025-11-29 08:29:41 +0000 UTC" firstStartedPulling="2025-11-29 08:29:42.945459466 +0000 UTC m=+6957.875548219" lastFinishedPulling="2025-11-29 08:29:45.42274018 +0000 UTC m=+6960.352828933" observedRunningTime="2025-11-29 08:29:45.991126376 +0000 UTC m=+6960.921215139" watchObservedRunningTime="2025-11-29 08:29:45.993325379 +0000 UTC m=+6960.923414132" Nov 29 08:29:51 crc kubenswrapper[4943]: I1129 08:29:51.867262 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:51 crc kubenswrapper[4943]: I1129 08:29:51.867858 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:51 crc kubenswrapper[4943]: I1129 08:29:51.911233 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:52 crc kubenswrapper[4943]: I1129 08:29:52.071298 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:52 crc kubenswrapper[4943]: I1129 08:29:52.328190 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:29:52 crc kubenswrapper[4943]: E1129 08:29:52.328526 4943 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f4gf7_openshift-machine-config-operator(9452a4f7-8768-4190-b544-50f80bc5ebf6)\"" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.115945 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.117281 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rvdnq" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="registry-server" containerID="cri-o://124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332" gracePeriod=2 Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.547011 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.644371 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vqgh\" (UniqueName: \"kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh\") pod \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.644450 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content\") pod \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.644603 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities\") pod \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\" (UID: \"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd\") " Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.645613 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities" (OuterVolumeSpecName: "utilities") pod "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" (UID: "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.654332 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh" (OuterVolumeSpecName: "kube-api-access-6vqgh") pod "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" (UID: "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd"). InnerVolumeSpecName "kube-api-access-6vqgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.666280 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" (UID: "79f89af8-4d15-4ef7-bea3-12d6a8ca89bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.746544 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.746939 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vqgh\" (UniqueName: \"kubernetes.io/projected/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-kube-api-access-6vqgh\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:55 crc kubenswrapper[4943]: I1129 08:29:55.746956 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.060901 4943 generic.go:334] "Generic (PLEG): container finished" podID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerID="124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332" exitCode=0 Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.060939 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerDied","Data":"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332"} Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.060964 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rvdnq" event={"ID":"79f89af8-4d15-4ef7-bea3-12d6a8ca89bd","Type":"ContainerDied","Data":"6a5331ca6b8035be17a3016604517367a087a7101f147c0180e02ff5388cf2ad"} Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.060979 4943 scope.go:117] "RemoveContainer" containerID="124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.061079 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rvdnq" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.097247 4943 scope.go:117] "RemoveContainer" containerID="55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.101084 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.109382 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rvdnq"] Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.125063 4943 scope.go:117] "RemoveContainer" containerID="ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.166478 4943 scope.go:117] "RemoveContainer" containerID="124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332" Nov 29 08:29:56 crc kubenswrapper[4943]: E1129 08:29:56.166918 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332\": container with ID starting with 124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332 not found: ID does not exist" containerID="124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.166948 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332"} err="failed to get container status \"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332\": rpc error: code = NotFound desc = could not find container \"124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332\": container with ID starting with 124e6ff9c40752e6ec9da69d67943f48f3e420f29e773538113c79b0b15ca332 not found: ID does not exist" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.166968 4943 scope.go:117] "RemoveContainer" containerID="55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d" Nov 29 08:29:56 crc kubenswrapper[4943]: E1129 08:29:56.167617 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d\": container with ID starting with 55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d not found: ID does not exist" containerID="55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.167649 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d"} err="failed to get container status \"55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d\": rpc error: code = NotFound desc = could not find container \"55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d\": container with ID starting with 55a81a727037d6ecce504b6cbd08bc3b79cb36b01905679c50563bed2defde3d not found: ID does not exist" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.167666 4943 scope.go:117] "RemoveContainer" containerID="ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485" Nov 29 08:29:56 crc kubenswrapper[4943]: E1129 08:29:56.168105 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485\": container with ID starting with ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485 not found: ID does not exist" containerID="ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485" Nov 29 08:29:56 crc kubenswrapper[4943]: I1129 08:29:56.168137 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485"} err="failed to get container status \"ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485\": rpc error: code = NotFound desc = could not find container \"ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485\": container with ID starting with ab36c654126b1aa5115139de85b0064d0b5ee5118c13be8bfcef61fdabbdb485 not found: ID does not exist" Nov 29 08:29:57 crc kubenswrapper[4943]: I1129 08:29:57.339943 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" path="/var/lib/kubelet/pods/79f89af8-4d15-4ef7-bea3-12d6a8ca89bd/volumes" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.176383 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6"] Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177204 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177220 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177236 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="extract-utilities" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177242 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="extract-utilities" Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177268 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177278 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177340 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="extract-utilities" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177350 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="extract-utilities" Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177363 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="extract-content" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177370 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="extract-content" Nov 29 08:30:00 crc kubenswrapper[4943]: E1129 08:30:00.177390 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="extract-content" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177396 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="extract-content" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177710 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="0677495d-8c1d-4630-b389-aaeb53034710" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.177725 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="79f89af8-4d15-4ef7-bea3-12d6a8ca89bd" containerName="registry-server" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.180350 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.184113 4943 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.184369 4943 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.198123 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6"] Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.343907 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.344061 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v5h7\" (UniqueName: \"kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.344203 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.448003 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.448997 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.449275 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v5h7\" (UniqueName: \"kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.449935 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.456096 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.470168 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v5h7\" (UniqueName: \"kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7\") pod \"collect-profiles-29406750-cqqg6\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.511026 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:00 crc kubenswrapper[4943]: I1129 08:30:00.976336 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6"] Nov 29 08:30:01 crc kubenswrapper[4943]: I1129 08:30:01.111651 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" event={"ID":"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170","Type":"ContainerStarted","Data":"d87af5b7d0d55b1dbcb0d87a5dea64ed3293f10b490b7f6a470d0ec344c0fcaf"} Nov 29 08:30:02 crc kubenswrapper[4943]: I1129 08:30:02.129132 4943 generic.go:334] "Generic (PLEG): container finished" podID="ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" containerID="f67db7261fda094910acf410cfe33773036e67456fc278889256394d1a124a4a" exitCode=0 Nov 29 08:30:02 crc kubenswrapper[4943]: I1129 08:30:02.129420 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" event={"ID":"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170","Type":"ContainerDied","Data":"f67db7261fda094910acf410cfe33773036e67456fc278889256394d1a124a4a"} Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.476506 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.606107 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v5h7\" (UniqueName: \"kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7\") pod \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.606237 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume\") pod \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.606463 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume\") pod \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\" (UID: \"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170\") " Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.607760 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume" (OuterVolumeSpecName: "config-volume") pod "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" (UID: "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.612961 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" (UID: "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.614606 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7" (OuterVolumeSpecName: "kube-api-access-6v5h7") pod "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" (UID: "ec2dc4a3-b6a3-4ea1-9257-a6e40a841170"). InnerVolumeSpecName "kube-api-access-6v5h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.708371 4943 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.708414 4943 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 08:30:03 crc kubenswrapper[4943]: I1129 08:30:03.708428 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v5h7\" (UniqueName: \"kubernetes.io/projected/ec2dc4a3-b6a3-4ea1-9257-a6e40a841170-kube-api-access-6v5h7\") on node \"crc\" DevicePath \"\"" Nov 29 08:30:04 crc kubenswrapper[4943]: I1129 08:30:04.148354 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" event={"ID":"ec2dc4a3-b6a3-4ea1-9257-a6e40a841170","Type":"ContainerDied","Data":"d87af5b7d0d55b1dbcb0d87a5dea64ed3293f10b490b7f6a470d0ec344c0fcaf"} Nov 29 08:30:04 crc kubenswrapper[4943]: I1129 08:30:04.148705 4943 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d87af5b7d0d55b1dbcb0d87a5dea64ed3293f10b490b7f6a470d0ec344c0fcaf" Nov 29 08:30:04 crc kubenswrapper[4943]: I1129 08:30:04.148419 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406750-cqqg6" Nov 29 08:30:04 crc kubenswrapper[4943]: I1129 08:30:04.549351 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4"] Nov 29 08:30:04 crc kubenswrapper[4943]: I1129 08:30:04.559391 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406705-t49f4"] Nov 29 08:30:05 crc kubenswrapper[4943]: I1129 08:30:05.342810 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c0ac0c8-7a37-405b-b60a-6c70680b7972" path="/var/lib/kubelet/pods/6c0ac0c8-7a37-405b-b60a-6c70680b7972/volumes" Nov 29 08:30:06 crc kubenswrapper[4943]: I1129 08:30:06.327142 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:30:07 crc kubenswrapper[4943]: I1129 08:30:07.172297 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"9d5d71c8e9010c3bed77d184f510d4db62b7c14350aac76759cbc2a0fb01307a"} Nov 29 08:30:47 crc kubenswrapper[4943]: I1129 08:30:47.520749 4943 generic.go:334] "Generic (PLEG): container finished" podID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerID="5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5" exitCode=0 Nov 29 08:30:47 crc kubenswrapper[4943]: I1129 08:30:47.520859 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-q4cgs/must-gather-khqct" event={"ID":"60e4522f-8e82-468d-8541-e0ae3d31456a","Type":"ContainerDied","Data":"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5"} Nov 29 08:30:47 crc kubenswrapper[4943]: I1129 08:30:47.522339 4943 scope.go:117] "RemoveContainer" containerID="5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5" Nov 29 08:30:47 crc kubenswrapper[4943]: I1129 08:30:47.611850 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-q4cgs_must-gather-khqct_60e4522f-8e82-468d-8541-e0ae3d31456a/gather/0.log" Nov 29 08:30:56 crc kubenswrapper[4943]: I1129 08:30:56.709543 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-q4cgs/must-gather-khqct"] Nov 29 08:30:56 crc kubenswrapper[4943]: I1129 08:30:56.710448 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-q4cgs/must-gather-khqct" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="copy" containerID="cri-o://c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb" gracePeriod=2 Nov 29 08:30:56 crc kubenswrapper[4943]: I1129 08:30:56.719596 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-q4cgs/must-gather-khqct"] Nov 29 08:30:56 crc kubenswrapper[4943]: I1129 08:30:56.788385 4943 scope.go:117] "RemoveContainer" containerID="15ee8554059e5d0a40d26906efa29c4151bf9892038e51080c83f1a8f8a779c6" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.184849 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-q4cgs_must-gather-khqct_60e4522f-8e82-468d-8541-e0ae3d31456a/copy/0.log" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.185639 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.312425 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fwdd\" (UniqueName: \"kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd\") pod \"60e4522f-8e82-468d-8541-e0ae3d31456a\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.312702 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output\") pod \"60e4522f-8e82-468d-8541-e0ae3d31456a\" (UID: \"60e4522f-8e82-468d-8541-e0ae3d31456a\") " Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.326935 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd" (OuterVolumeSpecName: "kube-api-access-4fwdd") pod "60e4522f-8e82-468d-8541-e0ae3d31456a" (UID: "60e4522f-8e82-468d-8541-e0ae3d31456a"). InnerVolumeSpecName "kube-api-access-4fwdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.414389 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fwdd\" (UniqueName: \"kubernetes.io/projected/60e4522f-8e82-468d-8541-e0ae3d31456a-kube-api-access-4fwdd\") on node \"crc\" DevicePath \"\"" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.492680 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "60e4522f-8e82-468d-8541-e0ae3d31456a" (UID: "60e4522f-8e82-468d-8541-e0ae3d31456a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.516729 4943 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/60e4522f-8e82-468d-8541-e0ae3d31456a-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.608800 4943 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-q4cgs_must-gather-khqct_60e4522f-8e82-468d-8541-e0ae3d31456a/copy/0.log" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.609532 4943 generic.go:334] "Generic (PLEG): container finished" podID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerID="c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb" exitCode=143 Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.609616 4943 scope.go:117] "RemoveContainer" containerID="c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.609684 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-q4cgs/must-gather-khqct" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.634615 4943 scope.go:117] "RemoveContainer" containerID="5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.705653 4943 scope.go:117] "RemoveContainer" containerID="c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb" Nov 29 08:30:57 crc kubenswrapper[4943]: E1129 08:30:57.706208 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb\": container with ID starting with c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb not found: ID does not exist" containerID="c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.706273 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb"} err="failed to get container status \"c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb\": rpc error: code = NotFound desc = could not find container \"c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb\": container with ID starting with c41fb09c51a4759af34de71bb75de0cb789d4e09107213252e8aff59315417bb not found: ID does not exist" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.706310 4943 scope.go:117] "RemoveContainer" containerID="5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5" Nov 29 08:30:57 crc kubenswrapper[4943]: E1129 08:30:57.706778 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5\": container with ID starting with 5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5 not found: ID does not exist" containerID="5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5" Nov 29 08:30:57 crc kubenswrapper[4943]: I1129 08:30:57.706808 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5"} err="failed to get container status \"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5\": rpc error: code = NotFound desc = could not find container \"5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5\": container with ID starting with 5901b012c512812becf3c88ee5add2d6d46047d69be45abeaa2be84af613c6d5 not found: ID does not exist" Nov 29 08:30:59 crc kubenswrapper[4943]: I1129 08:30:59.342829 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" path="/var/lib/kubelet/pods/60e4522f-8e82-468d-8541-e0ae3d31456a/volumes" Nov 29 08:31:56 crc kubenswrapper[4943]: I1129 08:31:56.870240 4943 scope.go:117] "RemoveContainer" containerID="aa727089b0f93873e04dbd69e064e0e2552a53c1f1797c47ee7dafa29a6b4f60" Nov 29 08:32:32 crc kubenswrapper[4943]: I1129 08:32:32.613175 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:32:32 crc kubenswrapper[4943]: I1129 08:32:32.614371 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.489390 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:32:49 crc kubenswrapper[4943]: E1129 08:32:49.490374 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="copy" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490389 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="copy" Nov 29 08:32:49 crc kubenswrapper[4943]: E1129 08:32:49.490407 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="gather" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490414 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="gather" Nov 29 08:32:49 crc kubenswrapper[4943]: E1129 08:32:49.490433 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" containerName="collect-profiles" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490441 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" containerName="collect-profiles" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490720 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="gather" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490736 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec2dc4a3-b6a3-4ea1-9257-a6e40a841170" containerName="collect-profiles" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.490757 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="60e4522f-8e82-468d-8541-e0ae3d31456a" containerName="copy" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.492289 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.518205 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.663701 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.663794 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.663823 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89wkm\" (UniqueName: \"kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.766125 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.766499 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.766532 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89wkm\" (UniqueName: \"kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.766760 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.767008 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.792649 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89wkm\" (UniqueName: \"kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm\") pod \"community-operators-rj5w4\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:49 crc kubenswrapper[4943]: I1129 08:32:49.823279 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:50 crc kubenswrapper[4943]: I1129 08:32:50.364599 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:32:50 crc kubenswrapper[4943]: I1129 08:32:50.604623 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerDied","Data":"540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0"} Nov 29 08:32:50 crc kubenswrapper[4943]: I1129 08:32:50.604749 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerID="540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0" exitCode=0 Nov 29 08:32:50 crc kubenswrapper[4943]: I1129 08:32:50.605271 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerStarted","Data":"2a629587049ca9db1f746a5b2b4961a6e29326f2076c745bb55adf1a8dc9846f"} Nov 29 08:32:50 crc kubenswrapper[4943]: I1129 08:32:50.607010 4943 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 08:32:51 crc kubenswrapper[4943]: I1129 08:32:51.617728 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerStarted","Data":"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148"} Nov 29 08:32:52 crc kubenswrapper[4943]: I1129 08:32:52.629538 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerID="4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148" exitCode=0 Nov 29 08:32:52 crc kubenswrapper[4943]: I1129 08:32:52.629599 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerDied","Data":"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148"} Nov 29 08:32:53 crc kubenswrapper[4943]: I1129 08:32:53.644068 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerStarted","Data":"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca"} Nov 29 08:32:53 crc kubenswrapper[4943]: I1129 08:32:53.666254 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rj5w4" podStartSLOduration=1.969183368 podStartE2EDuration="4.666229936s" podCreationTimestamp="2025-11-29 08:32:49 +0000 UTC" firstStartedPulling="2025-11-29 08:32:50.60644565 +0000 UTC m=+7145.536534403" lastFinishedPulling="2025-11-29 08:32:53.303492218 +0000 UTC m=+7148.233580971" observedRunningTime="2025-11-29 08:32:53.659955367 +0000 UTC m=+7148.590044140" watchObservedRunningTime="2025-11-29 08:32:53.666229936 +0000 UTC m=+7148.596318699" Nov 29 08:32:56 crc kubenswrapper[4943]: I1129 08:32:56.950935 4943 scope.go:117] "RemoveContainer" containerID="e33d1412a89893d73b888ed392252de7e7556b6ff8d9820bc4709033d750275a" Nov 29 08:32:59 crc kubenswrapper[4943]: I1129 08:32:59.824762 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:59 crc kubenswrapper[4943]: I1129 08:32:59.825439 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:32:59 crc kubenswrapper[4943]: I1129 08:32:59.878429 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:33:00 crc kubenswrapper[4943]: I1129 08:33:00.757157 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:33:00 crc kubenswrapper[4943]: I1129 08:33:00.804470 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:33:02 crc kubenswrapper[4943]: I1129 08:33:02.613913 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:33:02 crc kubenswrapper[4943]: I1129 08:33:02.614190 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:33:02 crc kubenswrapper[4943]: I1129 08:33:02.725375 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rj5w4" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="registry-server" containerID="cri-o://b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca" gracePeriod=2 Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.691583 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.746181 4943 generic.go:334] "Generic (PLEG): container finished" podID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerID="b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca" exitCode=0 Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.746232 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerDied","Data":"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca"} Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.746267 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rj5w4" event={"ID":"3b51df03-f27e-46f5-90c9-e0d163104a49","Type":"ContainerDied","Data":"2a629587049ca9db1f746a5b2b4961a6e29326f2076c745bb55adf1a8dc9846f"} Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.746288 4943 scope.go:117] "RemoveContainer" containerID="b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.746443 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rj5w4" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.769189 4943 scope.go:117] "RemoveContainer" containerID="4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.786968 4943 scope.go:117] "RemoveContainer" containerID="540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.840156 4943 scope.go:117] "RemoveContainer" containerID="b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca" Nov 29 08:33:03 crc kubenswrapper[4943]: E1129 08:33:03.840815 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca\": container with ID starting with b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca not found: ID does not exist" containerID="b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.840860 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca"} err="failed to get container status \"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca\": rpc error: code = NotFound desc = could not find container \"b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca\": container with ID starting with b7af71dfe329019c55f1c3219992bddf970928e04ef44320e9b214770e9e92ca not found: ID does not exist" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.840887 4943 scope.go:117] "RemoveContainer" containerID="4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148" Nov 29 08:33:03 crc kubenswrapper[4943]: E1129 08:33:03.841318 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148\": container with ID starting with 4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148 not found: ID does not exist" containerID="4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.841365 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148"} err="failed to get container status \"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148\": rpc error: code = NotFound desc = could not find container \"4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148\": container with ID starting with 4025a6e9503c1dc70268ede81e96cc114835276f0b76c16ef5799a76acfca148 not found: ID does not exist" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.841398 4943 scope.go:117] "RemoveContainer" containerID="540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0" Nov 29 08:33:03 crc kubenswrapper[4943]: E1129 08:33:03.841738 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0\": container with ID starting with 540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0 not found: ID does not exist" containerID="540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.842545 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0"} err="failed to get container status \"540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0\": rpc error: code = NotFound desc = could not find container \"540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0\": container with ID starting with 540189e81c1a9bf2ebad288b78edd13dcb5ff862bc821b6e96e8e8f651a918e0 not found: ID does not exist" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.843418 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities\") pod \"3b51df03-f27e-46f5-90c9-e0d163104a49\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.843500 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content\") pod \"3b51df03-f27e-46f5-90c9-e0d163104a49\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.843689 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89wkm\" (UniqueName: \"kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm\") pod \"3b51df03-f27e-46f5-90c9-e0d163104a49\" (UID: \"3b51df03-f27e-46f5-90c9-e0d163104a49\") " Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.844408 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities" (OuterVolumeSpecName: "utilities") pod "3b51df03-f27e-46f5-90c9-e0d163104a49" (UID: "3b51df03-f27e-46f5-90c9-e0d163104a49"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.844674 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.854892 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm" (OuterVolumeSpecName: "kube-api-access-89wkm") pod "3b51df03-f27e-46f5-90c9-e0d163104a49" (UID: "3b51df03-f27e-46f5-90c9-e0d163104a49"). InnerVolumeSpecName "kube-api-access-89wkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.898306 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b51df03-f27e-46f5-90c9-e0d163104a49" (UID: "3b51df03-f27e-46f5-90c9-e0d163104a49"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.946974 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b51df03-f27e-46f5-90c9-e0d163104a49-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:03 crc kubenswrapper[4943]: I1129 08:33:03.947015 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89wkm\" (UniqueName: \"kubernetes.io/projected/3b51df03-f27e-46f5-90c9-e0d163104a49-kube-api-access-89wkm\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:04 crc kubenswrapper[4943]: I1129 08:33:04.086960 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:33:04 crc kubenswrapper[4943]: I1129 08:33:04.094388 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rj5w4"] Nov 29 08:33:05 crc kubenswrapper[4943]: I1129 08:33:05.349448 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" path="/var/lib/kubelet/pods/3b51df03-f27e-46f5-90c9-e0d163104a49/volumes" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.588154 4943 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:14 crc kubenswrapper[4943]: E1129 08:33:14.589366 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="extract-content" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.589381 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="extract-content" Nov 29 08:33:14 crc kubenswrapper[4943]: E1129 08:33:14.589412 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="extract-utilities" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.589420 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="extract-utilities" Nov 29 08:33:14 crc kubenswrapper[4943]: E1129 08:33:14.589431 4943 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="registry-server" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.589436 4943 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="registry-server" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.589667 4943 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b51df03-f27e-46f5-90c9-e0d163104a49" containerName="registry-server" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.590938 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.605300 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.638555 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2lws\" (UniqueName: \"kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.638706 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.638861 4943 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.740734 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.740898 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2lws\" (UniqueName: \"kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.740954 4943 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.741717 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.741863 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.768411 4943 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2lws\" (UniqueName: \"kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws\") pod \"certified-operators-9xmwm\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:14 crc kubenswrapper[4943]: I1129 08:33:14.908796 4943 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:15 crc kubenswrapper[4943]: I1129 08:33:15.459222 4943 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:15 crc kubenswrapper[4943]: I1129 08:33:15.862031 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c197179-6191-4337-8c02-ab63c720a632" containerID="b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f" exitCode=0 Nov 29 08:33:15 crc kubenswrapper[4943]: I1129 08:33:15.862086 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerDied","Data":"b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f"} Nov 29 08:33:15 crc kubenswrapper[4943]: I1129 08:33:15.862111 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerStarted","Data":"daafad135e42e4111332d93b591a0925254cb5c803368b8bb5a867955aa006df"} Nov 29 08:33:16 crc kubenswrapper[4943]: I1129 08:33:16.870993 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerStarted","Data":"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2"} Nov 29 08:33:17 crc kubenswrapper[4943]: I1129 08:33:17.900655 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c197179-6191-4337-8c02-ab63c720a632" containerID="be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2" exitCode=0 Nov 29 08:33:17 crc kubenswrapper[4943]: I1129 08:33:17.900735 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerDied","Data":"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2"} Nov 29 08:33:18 crc kubenswrapper[4943]: I1129 08:33:18.914225 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerStarted","Data":"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8"} Nov 29 08:33:24 crc kubenswrapper[4943]: I1129 08:33:24.909470 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:24 crc kubenswrapper[4943]: I1129 08:33:24.910156 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:24 crc kubenswrapper[4943]: I1129 08:33:24.969333 4943 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:24 crc kubenswrapper[4943]: I1129 08:33:24.993553 4943 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9xmwm" podStartSLOduration=8.412330889 podStartE2EDuration="10.993535216s" podCreationTimestamp="2025-11-29 08:33:14 +0000 UTC" firstStartedPulling="2025-11-29 08:33:15.865345325 +0000 UTC m=+7170.795434078" lastFinishedPulling="2025-11-29 08:33:18.446549652 +0000 UTC m=+7173.376638405" observedRunningTime="2025-11-29 08:33:18.93218837 +0000 UTC m=+7173.862277123" watchObservedRunningTime="2025-11-29 08:33:24.993535216 +0000 UTC m=+7179.923623969" Nov 29 08:33:25 crc kubenswrapper[4943]: I1129 08:33:25.014986 4943 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:25 crc kubenswrapper[4943]: I1129 08:33:25.206495 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:26 crc kubenswrapper[4943]: I1129 08:33:26.982552 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9xmwm" podUID="2c197179-6191-4337-8c02-ab63c720a632" containerName="registry-server" containerID="cri-o://52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8" gracePeriod=2 Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.429372 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.614587 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2lws\" (UniqueName: \"kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws\") pod \"2c197179-6191-4337-8c02-ab63c720a632\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.614762 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities\") pod \"2c197179-6191-4337-8c02-ab63c720a632\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.614790 4943 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content\") pod \"2c197179-6191-4337-8c02-ab63c720a632\" (UID: \"2c197179-6191-4337-8c02-ab63c720a632\") " Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.616126 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities" (OuterVolumeSpecName: "utilities") pod "2c197179-6191-4337-8c02-ab63c720a632" (UID: "2c197179-6191-4337-8c02-ab63c720a632"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.622252 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws" (OuterVolumeSpecName: "kube-api-access-g2lws") pod "2c197179-6191-4337-8c02-ab63c720a632" (UID: "2c197179-6191-4337-8c02-ab63c720a632"). InnerVolumeSpecName "kube-api-access-g2lws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.672889 4943 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c197179-6191-4337-8c02-ab63c720a632" (UID: "2c197179-6191-4337-8c02-ab63c720a632"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.717062 4943 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2lws\" (UniqueName: \"kubernetes.io/projected/2c197179-6191-4337-8c02-ab63c720a632-kube-api-access-g2lws\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.717104 4943 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.717118 4943 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c197179-6191-4337-8c02-ab63c720a632-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.994902 4943 generic.go:334] "Generic (PLEG): container finished" podID="2c197179-6191-4337-8c02-ab63c720a632" containerID="52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8" exitCode=0 Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.994987 4943 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xmwm" Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.995111 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerDied","Data":"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8"} Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.995448 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xmwm" event={"ID":"2c197179-6191-4337-8c02-ab63c720a632","Type":"ContainerDied","Data":"daafad135e42e4111332d93b591a0925254cb5c803368b8bb5a867955aa006df"} Nov 29 08:33:27 crc kubenswrapper[4943]: I1129 08:33:27.995506 4943 scope.go:117] "RemoveContainer" containerID="52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.032539 4943 scope.go:117] "RemoveContainer" containerID="be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.050600 4943 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.058860 4943 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9xmwm"] Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.063935 4943 scope.go:117] "RemoveContainer" containerID="b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.111632 4943 scope.go:117] "RemoveContainer" containerID="52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8" Nov 29 08:33:28 crc kubenswrapper[4943]: E1129 08:33:28.112001 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8\": container with ID starting with 52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8 not found: ID does not exist" containerID="52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.112030 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8"} err="failed to get container status \"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8\": rpc error: code = NotFound desc = could not find container \"52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8\": container with ID starting with 52eb17e44ecb0aad2ca10e72a75590506a531621a5127b58098bfd9d1ccc15c8 not found: ID does not exist" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.112051 4943 scope.go:117] "RemoveContainer" containerID="be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2" Nov 29 08:33:28 crc kubenswrapper[4943]: E1129 08:33:28.112469 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2\": container with ID starting with be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2 not found: ID does not exist" containerID="be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.112509 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2"} err="failed to get container status \"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2\": rpc error: code = NotFound desc = could not find container \"be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2\": container with ID starting with be371b60e21b2cb375bd593e3f2e52ef9b1f7b57cfbfc84935b79846b2db37b2 not found: ID does not exist" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.112534 4943 scope.go:117] "RemoveContainer" containerID="b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f" Nov 29 08:33:28 crc kubenswrapper[4943]: E1129 08:33:28.112872 4943 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f\": container with ID starting with b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f not found: ID does not exist" containerID="b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f" Nov 29 08:33:28 crc kubenswrapper[4943]: I1129 08:33:28.112892 4943 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f"} err="failed to get container status \"b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f\": rpc error: code = NotFound desc = could not find container \"b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f\": container with ID starting with b024f96d75b55b56c030f6e885ffe5218e89e9689466f9737d6fa900bd8f8d1f not found: ID does not exist" Nov 29 08:33:29 crc kubenswrapper[4943]: I1129 08:33:29.337816 4943 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c197179-6191-4337-8c02-ab63c720a632" path="/var/lib/kubelet/pods/2c197179-6191-4337-8c02-ab63c720a632/volumes" Nov 29 08:33:32 crc kubenswrapper[4943]: I1129 08:33:32.613370 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:33:32 crc kubenswrapper[4943]: I1129 08:33:32.614010 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 08:33:32 crc kubenswrapper[4943]: I1129 08:33:32.614059 4943 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" Nov 29 08:33:32 crc kubenswrapper[4943]: I1129 08:33:32.614910 4943 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d5d71c8e9010c3bed77d184f510d4db62b7c14350aac76759cbc2a0fb01307a"} pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 08:33:32 crc kubenswrapper[4943]: I1129 08:33:32.614971 4943 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" containerID="cri-o://9d5d71c8e9010c3bed77d184f510d4db62b7c14350aac76759cbc2a0fb01307a" gracePeriod=600 Nov 29 08:33:33 crc kubenswrapper[4943]: I1129 08:33:33.042238 4943 generic.go:334] "Generic (PLEG): container finished" podID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerID="9d5d71c8e9010c3bed77d184f510d4db62b7c14350aac76759cbc2a0fb01307a" exitCode=0 Nov 29 08:33:33 crc kubenswrapper[4943]: I1129 08:33:33.042391 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerDied","Data":"9d5d71c8e9010c3bed77d184f510d4db62b7c14350aac76759cbc2a0fb01307a"} Nov 29 08:33:33 crc kubenswrapper[4943]: I1129 08:33:33.042628 4943 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" event={"ID":"9452a4f7-8768-4190-b544-50f80bc5ebf6","Type":"ContainerStarted","Data":"a7bdf7d36f78b4c4a9e6ebb1631bd142753b2d4f1be0195cd89471e922463064"} Nov 29 08:33:33 crc kubenswrapper[4943]: I1129 08:33:33.042678 4943 scope.go:117] "RemoveContainer" containerID="4a85d580600e1bd316f9ece2b5eae628965cbe59e148e92f6b7763a53dee3b9a" Nov 29 08:35:32 crc kubenswrapper[4943]: I1129 08:35:32.613947 4943 patch_prober.go:28] interesting pod/machine-config-daemon-f4gf7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 08:35:32 crc kubenswrapper[4943]: I1129 08:35:32.614499 4943 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f4gf7" podUID="9452a4f7-8768-4190-b544-50f80bc5ebf6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112530167024446 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112530167017363 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112511361016501 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112511361015451 5ustar corecore